code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
from __future__ import annotations __a = 'Muhammad Umer Farooq' __a = 'MIT' __a = '1.0.0' __a = 'Muhammad Umer Farooq' __a = '[email protected]' __a = 'Alpha' import re from html.parser import HTMLParser from urllib import parse import requests class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ) -> None: super().__init__() UpperCAmelCase_ : list[str] = [] UpperCAmelCase_ : Optional[Any] = domain def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> None: # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: UpperCAmelCase_ : Any = parse.urljoin(self.domain ,_SCREAMING_SNAKE_CASE ) self.urls.append(_SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' return ".".join(get_sub_domain_name(_lowercase ).split('''.''' )[-2:] ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' return parse.urlparse(_lowercase ).netloc def lowerCamelCase__ ( _lowercase = "https://github.com" ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = get_domain_name(_lowercase ) # Initialize the parser UpperCAmelCase_ : Tuple = Parser(_lowercase ) try: # Open URL UpperCAmelCase_ : List[Any] = requests.get(_lowercase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through UpperCAmelCase_ : Optional[Any] = set() for link in parser.urls: # open URL. # read = requests.get(link) try: UpperCAmelCase_ : Union[str, Any] = requests.get(_lowercase ) # Get the valid email. UpperCAmelCase_ : Any = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(_lowercase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(_lowercase ) if __name__ == "__main__": __a = emails_from_url('https://github.com') print(F"""{len(emails)} emails found:""") print('\n'.join(sorted(emails)))
30
from __future__ import annotations import math __a = '2020.9.26' __a = 'xcodz-dot, cclaus, dhruvmanila' def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ): UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}''' raise TypeError(_lowercase ) UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): raise TypeError('''Axis must be a str''' ) UpperCAmelCase_ : Optional[Any] = locals() del input_variables["axis"] if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ): UpperCAmelCase_ : List[Any] = ( '''Input values except axis must either be float or int: ''' f'''{list(input_variables.values() )}''' ) raise TypeError(_lowercase ) UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi if axis == "z": UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase ) UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = z elif axis == "x": UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase ) UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase ) UpperCAmelCase_ : Dict = x elif axis == "y": UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = y else: raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""") print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
30
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class __a( _a ): """simple docstring""" lowerCAmelCase = '''wav2vec2''' def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="sum" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]: super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = hidden_size UpperCAmelCase_ : Tuple = feat_extract_norm UpperCAmelCase_ : List[Any] = feat_extract_activation UpperCAmelCase_ : str = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = conv_bias UpperCAmelCase_ : str = num_conv_pos_embeddings UpperCAmelCase_ : Any = num_conv_pos_embedding_groups UpperCAmelCase_ : Tuple = len(self.conv_dim ) UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Dict = intermediate_size UpperCAmelCase_ : Any = hidden_act UpperCAmelCase_ : Any = num_attention_heads UpperCAmelCase_ : str = hidden_dropout UpperCAmelCase_ : int = attention_dropout UpperCAmelCase_ : Tuple = activation_dropout UpperCAmelCase_ : List[str] = feat_proj_dropout UpperCAmelCase_ : int = final_dropout UpperCAmelCase_ : Union[str, Any] = layerdrop UpperCAmelCase_ : Optional[Any] = layer_norm_eps UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : List[str] = vocab_size UpperCAmelCase_ : Optional[int] = do_stable_layer_norm UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase_ : Optional[int] = apply_spec_augment UpperCAmelCase_ : Tuple = mask_time_prob UpperCAmelCase_ : Optional[Any] = mask_time_length UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks UpperCAmelCase_ : Optional[Any] = mask_feature_prob UpperCAmelCase_ : str = mask_feature_length UpperCAmelCase_ : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations UpperCAmelCase_ : Union[str, Any] = num_codevectors_per_group UpperCAmelCase_ : Any = num_codevector_groups UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature UpperCAmelCase_ : List[str] = feat_quantizer_dropout UpperCAmelCase_ : Dict = num_negatives UpperCAmelCase_ : List[str] = codevector_dim UpperCAmelCase_ : List[str] = proj_codevector_dim UpperCAmelCase_ : str = diversity_loss_weight # ctc loss UpperCAmelCase_ : List[Any] = ctc_loss_reduction UpperCAmelCase_ : List[str] = ctc_zero_infinity # adapter UpperCAmelCase_ : Optional[Any] = add_adapter UpperCAmelCase_ : Any = adapter_kernel_size UpperCAmelCase_ : Optional[int] = adapter_stride UpperCAmelCase_ : List[Any] = num_adapter_layers UpperCAmelCase_ : Optional[Any] = output_hidden_size or hidden_size UpperCAmelCase_ : Optional[int] = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. UpperCAmelCase_ : List[str] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. UpperCAmelCase_ : List[str] = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = xvector_output_dim @property def a__ ( self ) -> Any: return functools.reduce(operator.mul ,self.conv_stride ,1 )
30
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __a = '2.13.1' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('3.7'): raise ImportWarning( 'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( 'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n' 'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __a = concatenate_datasets __a = DownloadConfig __a = DownloadManager __a = DownloadMode __a = DownloadConfig __a = DownloadMode __a = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
30
1
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration __a = [ # tf -> hf ('/', '.'), ('layer_', 'layers.'), ('kernel', 'weight'), ('beta', 'bias'), ('gamma', 'weight'), ('pegasus', 'model'), ] __a = [ ('.output.dense', '.fc2'), ('intermediate.LayerNorm', 'final_layer_norm'), ('intermediate.dense', 'fc1'), ] __a = ( INIT_COMMON + [ ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.out_proj'), ('attention.self', 'self_attn'), ('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'), ('attention.encdec_output.dense', 'encoder_attn.out_proj'), ('attention.encdec', 'encoder_attn'), ('key', 'k_proj'), ('value', 'v_proj'), ('query', 'q_proj'), ('decoder.LayerNorm', 'decoder.layernorm_embedding'), ] + END_COMMON ) __a = ( INIT_COMMON + [ ('embeddings.word_embeddings', 'shared.weight'), ('embeddings.position_embeddings', 'embed_positions.weight'), ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.output'), ('attention.self', 'self_attn.self'), ('encoder.LayerNorm', 'encoder.layernorm_embedding'), ] + END_COMMON ) __a = [ 'encdec/key/bias', 'encdec/query/bias', 'encdec/value/bias', 'self/key/bias', 'self/query/bias', 'self/value/bias', 'encdec_output/dense/bias', 'attention/output/dense/bias', ] def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' for tf_name, hf_name in patterns: UpperCAmelCase_ : int = k.replace(_lowercase , _lowercase ) return k def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = BigBirdPegasusConfig(**_lowercase ) UpperCAmelCase_ : Union[str, Any] = BigBirdPegasusForConditionalGeneration(_lowercase ) UpperCAmelCase_ : Union[str, Any] = torch_model.state_dict() UpperCAmelCase_ : Dict = {} # separating decoder weights UpperCAmelCase_ : Any = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} UpperCAmelCase_ : Optional[Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ): UpperCAmelCase_ : Optional[Any] = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE] if any(_lowercase ): continue UpperCAmelCase_ : Optional[int] = DECODER_PATTERNS UpperCAmelCase_ : Union[str, Any] = rename_state_dict_key(_lowercase , _lowercase ) if new_k not in state_dict: raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): UpperCAmelCase_ : int = v.T UpperCAmelCase_ : List[Any] = torch.from_numpy(_lowercase ) assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ): UpperCAmelCase_ : Optional[int] = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE] if any(_lowercase ): continue UpperCAmelCase_ : str = REMAINING_PATTERNS UpperCAmelCase_ : Union[str, Any] = rename_state_dict_key(_lowercase , _lowercase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): UpperCAmelCase_ : int = v.T UpperCAmelCase_ : Tuple = torch.from_numpy(_lowercase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' UpperCAmelCase_ : Tuple = mapping['''model.embed_positions.weight'''] UpperCAmelCase_ : str = mapping.pop('''model.embed_positions.weight''' ) UpperCAmelCase_, UpperCAmelCase_ : List[Any] = torch_model.load_state_dict(_lowercase , strict=_lowercase ) UpperCAmelCase_ : str = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], f'''no matches found for the following tf keys {extra}''' return torch_model def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[str] = tf.train.list_variables(_lowercase ) UpperCAmelCase_ : List[Any] = {} UpperCAmelCase_ : List[str] = ['''global_step'''] for name, shape in tqdm(_lowercase , desc='''converting tf checkpoint to dict''' ): UpperCAmelCase_ : Any = any(pat in name for pat in ignore_name ) if skip_key: continue UpperCAmelCase_ : Tuple = tf.train.load_variable(_lowercase , _lowercase ) UpperCAmelCase_ : List[Any] = array return tf_weights def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : Dict = get_tf_weights_as_numpy(_lowercase ) UpperCAmelCase_ : Any = convert_bigbird_pegasus(_lowercase , _lowercase ) torch_model.save_pretrained(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') __a = parser.parse_args() __a = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
30
def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' while a != 0: UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = b % a, a return b def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' if gcd(_lowercase , _lowercase ) != 1: UpperCAmelCase_ : int = f'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(_lowercase ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = 1, 0, a UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = 0, 1, m while va != 0: UpperCAmelCase_ : List[Any] = ua // va UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
30
1
from collections.abc import Callable import numpy as np def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) ) UpperCAmelCase_ : Dict = np.zeros((n + 1,) ) UpperCAmelCase_ : str = ya UpperCAmelCase_ : Tuple = xa for k in range(_lowercase ): UpperCAmelCase_ : Optional[int] = y[k] + step_size * ode_func(_lowercase , y[k] ) UpperCAmelCase_ : Optional[Any] = y[k] + ( (step_size / 2) * (ode_func(_lowercase , y[k] ) + ode_func(x + step_size , _lowercase )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
30
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __a( _a ): """simple docstring""" lowerCAmelCase = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) lowerCAmelCase = '''CIDAS/clipseg-rd64-refined''' lowerCAmelCase = '''image_segmenter''' lowerCAmelCase = CLIPSegForImageSegmentation lowerCAmelCase = ['''image''', '''text'''] lowerCAmelCase = ['''image'''] def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict: requires_backends(self ,['''vision'''] ) super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: return self.pre_processor(text=[label] ,images=[image] ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str: with torch.no_grad(): UpperCAmelCase_ : Dict = self.model(**_SCREAMING_SNAKE_CASE ).logits return logits def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict: UpperCAmelCase_ : Dict = outputs.cpu().detach().numpy() UpperCAmelCase_ : Any = 0 UpperCAmelCase_ : List[Any] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
30
1
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __a( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = 0 @slow def a__ ( self ) -> Any: for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 ) def a__ ( self ) -> Optional[Any]: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size ,12 ) def a__ ( self ) -> Tuple: UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size ,20 ) def a__ ( self ) -> List[str]: UpperCAmelCase_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Check that tokenizer_type ≠ model_type UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size ,12 ) def a__ ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) ) UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) ) UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) @require_tokenizers def a__ ( self ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) ) UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) ) UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> int: with pytest.raises(_SCREAMING_SNAKE_CASE ): AutoTokenizer.from_pretrained('''./''' ,tokenizer_type='''xxx''' ) @require_tokenizers def a__ ( self ) -> Optional[Any]: for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: UpperCAmelCase_ : Any = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE ) else: self.assertEqual(tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.model_max_length ,512 ) @require_tokenizers def a__ ( self ) -> List[Any]: for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' ,): UpperCAmelCase_ : int = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def a__ ( self ) -> Optional[Any]: # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai UpperCAmelCase_ : int = TOKENIZER_MAPPING.values() UpperCAmelCase_ : List[Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_SCREAMING_SNAKE_CASE ) @require_tokenizers def a__ ( self ) -> Tuple: self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ,use_fast=_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) ,_SCREAMING_SNAKE_CASE ) @require_tokenizers def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' ,do_lower_case=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = '''Hello, world. How are you?''' UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertEqual('''[UNK]''' ,tokens[0] ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' ,do_lower_case=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertEqual('''[UNK]''' ,tokens[0] ) @require_tokenizers def a__ ( self ) -> Dict: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.model_max_length ,512 ) self.assertEqual(tokenizer.vocab_size ,30_000 ) self.assertEqual(tokenizer.unk_token ,'''[UNK]''' ) self.assertEqual(tokenizer.padding_side ,'''right''' ) self.assertEqual(tokenizer.truncation_side ,'''right''' ) def a__ ( self ) -> Dict: UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size ,12 ) def a__ ( self ) -> Optional[Any]: UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> str: # Check we can load the tokenizer config of an online model. UpperCAmelCase_ : int = get_tokenizer_config('''bert-base-cased''' ) UpperCAmelCase_ : Optional[int] = config.pop('''_commit_hash''' ,_SCREAMING_SNAKE_CASE ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_SCREAMING_SNAKE_CASE ,{'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. UpperCAmelCase_ : Any = get_tokenizer_config(_SCREAMING_SNAKE_CASE ) self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = get_tokenizer_config(_SCREAMING_SNAKE_CASE ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] ,'''BertTokenizer''' ) def a__ ( self ) -> str: try: AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_SCREAMING_SNAKE_CASE ): AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = CustomTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def a__ ( self ) -> int: try: AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE ) # Can register in two steps AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_SCREAMING_SNAKE_CASE ): AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE ) bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def a__ ( self ) -> Optional[int]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' ) @require_tokenizers def a__ ( self ) -> Optional[int]: class __a( _a ): """simple docstring""" lowerCAmelCase = False class __a( _a ): """simple docstring""" lowerCAmelCase = NewTokenizer lowerCAmelCase = False try: AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def a__ ( self ) -> int: UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) def a__ ( self ) -> Optional[Any]: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' ) def a__ ( self ) -> List[Any]: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' ) def a__ ( self ) -> Any: # Make sure we have cached the tokenizer. UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count ,0 ) self.assertEqual(counter.head_request_count ,1 ) self.assertEqual(counter.other_request_count ,0 )
30
import numpy as np import datasets __a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' __a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' __a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __a( datasets.Metric ): """simple docstring""" def a__ ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''' ,id='''sequence''' ) ,id='''X''' ), } ) ,) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: # convert to numpy arrays UpperCAmelCase_ : str = np.array(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.array(_SCREAMING_SNAKE_CASE ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('''Expected `X` to be a 2D vector''' ) if len(reference_distribution.shape ) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''' ) if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' ) # Get mahalanobis distance for each prediction UpperCAmelCase_ : List[str] = X - np.mean(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = np.cov(reference_distribution.T ) try: UpperCAmelCase_ : Any = np.linalg.inv(_SCREAMING_SNAKE_CASE ) except np.linalg.LinAlgError: UpperCAmelCase_ : List[str] = np.linalg.pinv(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.dot(_SCREAMING_SNAKE_CASE ,X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
30
1
def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Dict = 0 for i in range(1 , 1001 ): total += i**i return str(_lowercase )[-10:] if __name__ == "__main__": print(solution())
30
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __a = logging.get_logger(__name__) __a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __a = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'facebook/dpr-ctx_encoder-single-nq-base': 512, 'facebook/dpr-ctx_encoder-multiset-base': 512, } __a = { 'facebook/dpr-question_encoder-single-nq-base': 512, 'facebook/dpr-question_encoder-multiset-base': 512, } __a = { 'facebook/dpr-reader-single-nq-base': 512, 'facebook/dpr-reader-multiset-base': 512, } __a = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } __a = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } __a = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class __a( _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __a( _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __a = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) __a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) __a = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(_a ) class __a: """simple docstring""" def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding: if titles is None and texts is None: return super().__call__( _SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) elif titles is None or texts is None: UpperCAmelCase_ : List[str] = titles if texts is None else texts return super().__call__( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : List[Any] = titles if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [titles] UpperCAmelCase_ : List[str] = texts if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [texts] UpperCAmelCase_ : Any = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = questions if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [questions] * n_passages if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError( f'''There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.''' ) UpperCAmelCase_ : Tuple = super().__call__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids'''] UpperCAmelCase_ : int = super().__call__(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids'''] UpperCAmelCase_ : Optional[int] = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ] } if return_attention_mask is not False: UpperCAmelCase_ : List[str] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) UpperCAmelCase_ : Dict = attention_mask return self.pad(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 4 ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Tuple = reader_input['''input_ids'''] UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = reader_output[:3] UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = sorted(range(_SCREAMING_SNAKE_CASE ) ,reverse=_SCREAMING_SNAKE_CASE ,key=relevance_logits.__getitem__ ) UpperCAmelCase_ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: UpperCAmelCase_ : List[Any] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id ) else: UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_SCREAMING_SNAKE_CASE ,top_spans=_SCREAMING_SNAKE_CASE ,) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_SCREAMING_SNAKE_CASE ,start_index=_SCREAMING_SNAKE_CASE ,end_index=_SCREAMING_SNAKE_CASE ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) ) if len(_SCREAMING_SNAKE_CASE ) >= num_spans: break return nbest_spans_predictions[:num_spans] def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Tuple = [] for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) UpperCAmelCase_ : int = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ,reverse=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' ) UpperCAmelCase_ : str = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_SCREAMING_SNAKE_CASE ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_a ) class __a( _a , _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase = ['''input_ids''', '''attention_mask''']
30
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available __a = { 'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ErnieForCausalLM', 'ErnieForMaskedLM', 'ErnieForMultipleChoice', 'ErnieForNextSentencePrediction', 'ErnieForPreTraining', 'ErnieForQuestionAnswering', 'ErnieForSequenceClassification', 'ErnieForTokenClassification', 'ErnieModel', 'ErniePreTrainedModel', ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __a = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[str] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_lowercase , _lowercase ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = emb.weight.shape UpperCAmelCase_ : Union[str, Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) UpperCAmelCase_ : str = emb.weight.data return lin_layer def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Dict = torch.load(_lowercase , map_location='''cpu''' ) UpperCAmelCase_ : Tuple = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model'''] UpperCAmelCase_ : int = mam_aaa['''model'''] remove_ignore_keys_(_lowercase ) UpperCAmelCase_ : Optional[int] = state_dict['''encoder.embed_tokens.weight'''].shape[0] UpperCAmelCase_ : Any = MaMaaaConfig( vocab_size=_lowercase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , ) UpperCAmelCase_ : List[str] = state_dict['''decoder.embed_tokens.weight'''] UpperCAmelCase_ : Union[str, Any] = MaMaaaForConditionalGeneration(_lowercase ) model.model.load_state_dict(_lowercase , strict=_lowercase ) UpperCAmelCase_ : Any = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') __a = parser.parse_args() __a = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
30
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class __a( _a ): """simple docstring""" lowerCAmelCase = '''wav2vec2''' def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="sum" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]: super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = hidden_size UpperCAmelCase_ : Tuple = feat_extract_norm UpperCAmelCase_ : List[Any] = feat_extract_activation UpperCAmelCase_ : str = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = conv_bias UpperCAmelCase_ : str = num_conv_pos_embeddings UpperCAmelCase_ : Any = num_conv_pos_embedding_groups UpperCAmelCase_ : Tuple = len(self.conv_dim ) UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Dict = intermediate_size UpperCAmelCase_ : Any = hidden_act UpperCAmelCase_ : Any = num_attention_heads UpperCAmelCase_ : str = hidden_dropout UpperCAmelCase_ : int = attention_dropout UpperCAmelCase_ : Tuple = activation_dropout UpperCAmelCase_ : List[str] = feat_proj_dropout UpperCAmelCase_ : int = final_dropout UpperCAmelCase_ : Union[str, Any] = layerdrop UpperCAmelCase_ : Optional[Any] = layer_norm_eps UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : List[str] = vocab_size UpperCAmelCase_ : Optional[int] = do_stable_layer_norm UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase_ : Optional[int] = apply_spec_augment UpperCAmelCase_ : Tuple = mask_time_prob UpperCAmelCase_ : Optional[Any] = mask_time_length UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks UpperCAmelCase_ : Optional[Any] = mask_feature_prob UpperCAmelCase_ : str = mask_feature_length UpperCAmelCase_ : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations UpperCAmelCase_ : Union[str, Any] = num_codevectors_per_group UpperCAmelCase_ : Any = num_codevector_groups UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature UpperCAmelCase_ : List[str] = feat_quantizer_dropout UpperCAmelCase_ : Dict = num_negatives UpperCAmelCase_ : List[str] = codevector_dim UpperCAmelCase_ : List[str] = proj_codevector_dim UpperCAmelCase_ : str = diversity_loss_weight # ctc loss UpperCAmelCase_ : List[Any] = ctc_loss_reduction UpperCAmelCase_ : List[str] = ctc_zero_infinity # adapter UpperCAmelCase_ : Optional[Any] = add_adapter UpperCAmelCase_ : Any = adapter_kernel_size UpperCAmelCase_ : Optional[int] = adapter_stride UpperCAmelCase_ : List[Any] = num_adapter_layers UpperCAmelCase_ : Optional[Any] = output_hidden_size or hidden_size UpperCAmelCase_ : Optional[int] = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. UpperCAmelCase_ : List[str] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. UpperCAmelCase_ : List[str] = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = xvector_output_dim @property def a__ ( self ) -> Any: return functools.reduce(operator.mul ,self.conv_stride ,1 )
30
1
import math def lowerCamelCase__ ( _lowercase ): '''simple docstring''' assert isinstance(_lowercase , _lowercase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCAmelCase_ : str = range(3 , int(math.sqrt(_lowercase ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def lowerCamelCase__ ( _lowercase , _lowercase=1 , **_lowercase ): '''simple docstring''' UpperCAmelCase_ : Any = factor * value UpperCAmelCase_ : List[str] = value while not is_prime(_lowercase ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **_lowercase ) return value
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['NllbTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['NllbTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor __a = logging.get_logger(__name__) class __a( _a ): """simple docstring""" def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None: warnings.warn( '''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use FlavaImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,) super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
30
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList __a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict: UpperCAmelCase_ : List[Any] = tokenizer UpperCAmelCase_ : int = dataset UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks UpperCAmelCase_ : Optional[int] = n_copies def __iter__( self ) -> Any: UpperCAmelCase_ : List[Any] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() ) UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]: UpperCAmelCase_ : str = start_length UpperCAmelCase_ : Optional[int] = eof_strings UpperCAmelCase_ : str = tokenizer def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) UpperCAmelCase_ : Optional[int] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(_SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase ) # last string should be "" return "".join(string_list[:-2] ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens for step, batch in tqdm(enumerate(_lowercase ) ): with torch.no_grad(): UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1] UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase ) # each task is generated batch_size times UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase ) UpperCAmelCase_ : Dict = accelerator.pad_across_processes( _lowercase , dim=1 , pad_index=tokenizer.pad_token_id ) UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) ) UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy() UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(_lowercase , _lowercase ): gen_token_dict[task].append(_lowercase ) UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase ) code_gens[task].append(remove_last_block(_lowercase ) ) return code_gens def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase ) UpperCAmelCase_ : int = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing UpperCAmelCase_ : List[Any] = '''false''' if args.num_workers is None: UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate UpperCAmelCase_ : int = Accelerator() set_seed(args.seed , device_specific=_lowercase ) # Load model and tokenizer UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt ) UpperCAmelCase_ : Any = tokenizer.eos_token UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings UpperCAmelCase_ : str = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ), } # Load evaluation dataset and metric UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' ) UpperCAmelCase_ : Dict = load_metric('''code_eval''' ) UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) UpperCAmelCase_ : str = args.n_samples // args.batch_size UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase ) # do not confuse args.batch_size, which is actually the num_return_sequences UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase ) UpperCAmelCase_ : int = complete_code( _lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , ) if accelerator.is_main_process: UpperCAmelCase_ : Any = [] for task in tqdm(range(_lowercase ) ): UpperCAmelCase_ : int = human_eval['''test'''][task]['''test'''] UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})''' references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute( references=_lowercase , predictions=_lowercase , num_workers=args.num_workers ) print(f'''Results: {pass_at_k}''' ) # Save results to json file with open(args.output_file , '''w''' ) as fp: json.dump(_lowercase , _lowercase ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
30
1
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging __a = { 'cola': 2, 'mnli': 3, 'mrpc': 2, 'sst-2': 2, 'sts-b': 1, 'qqp': 2, 'qnli': 2, 'rte': 2, 'wnli': 2, } logging.set_verbosity_info() def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=None ): '''simple docstring''' UpperCAmelCase_ : Dict = XLNetConfig.from_json_file(_lowercase ) UpperCAmelCase_ : Optional[int] = finetuning_task.lower() if finetuning_task is not None else '''''' if finetuning_task in GLUE_TASKS_NUM_LABELS: print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' ) UpperCAmelCase_ : Optional[int] = finetuning_task UpperCAmelCase_ : Optional[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task] UpperCAmelCase_ : List[str] = XLNetForSequenceClassification(_lowercase ) elif "squad" in finetuning_task: UpperCAmelCase_ : Dict = finetuning_task UpperCAmelCase_ : List[str] = XLNetForQuestionAnswering(_lowercase ) else: UpperCAmelCase_ : List[str] = XLNetLMHeadModel(_lowercase ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(_lowercase , _lowercase , _lowercase ) # Save pytorch-model UpperCAmelCase_ : int = os.path.join(_lowercase , _lowercase ) UpperCAmelCase_ : Union[str, Any] = os.path.join(_lowercase , _lowercase ) print(f'''Save PyTorch model to {os.path.abspath(_lowercase )}''' ) torch.save(model.state_dict() , _lowercase ) print(f'''Save configuration file to {os.path.abspath(_lowercase )}''' ) with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--xlnet_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained XLNet model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the folder to store the PyTorch model or dataset/vocab.', ) parser.add_argument( '--finetuning_task', default=None, type=str, help='Name of a task on which the XLNet TensorFlow model was fine-tuned', ) __a = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
30
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType __a = logging.get_logger(__name__) __a = { 'openai/imagegpt-small': '', 'openai/imagegpt-medium': '', 'openai/imagegpt-large': '', } class __a( _a ): """simple docstring""" lowerCAmelCase = '''imagegpt''' lowerCAmelCase = ['''past_key_values'''] lowerCAmelCase = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]: UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Union[str, Any] = n_positions UpperCAmelCase_ : Union[str, Any] = n_embd UpperCAmelCase_ : Any = n_layer UpperCAmelCase_ : Optional[Any] = n_head UpperCAmelCase_ : Union[str, Any] = n_inner UpperCAmelCase_ : List[Any] = activation_function UpperCAmelCase_ : List[str] = resid_pdrop UpperCAmelCase_ : str = embd_pdrop UpperCAmelCase_ : Optional[Any] = attn_pdrop UpperCAmelCase_ : Dict = layer_norm_epsilon UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Dict = scale_attn_weights UpperCAmelCase_ : Any = use_cache UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx UpperCAmelCase_ : Tuple = reorder_and_upcast_attn UpperCAmelCase_ : int = tie_word_embeddings super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) class __a( _a ): """simple docstring""" @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ] ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]: UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) ) return inputs
30
1
from __future__ import annotations __a = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] __a = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : List[str] = len(_lowercase ) for i in range(_lowercase ): UpperCAmelCase_ : float = -1 for j in range(i + 1 , _lowercase ): if arr[i] < arr[j]: UpperCAmelCase_ : Union[str, Any] = arr[j] break result.append(_lowercase ) return result def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = [] for i, outer in enumerate(_lowercase ): UpperCAmelCase_ : float = -1 for inner in arr[i + 1 :]: if outer < inner: UpperCAmelCase_ : List[Any] = inner break result.append(_lowercase ) return result def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = len(_lowercase ) UpperCAmelCase_ : list[float] = [] UpperCAmelCase_ : list[float] = [-1] * arr_size for index in reversed(range(_lowercase ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: UpperCAmelCase_ : int = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) __a = ( 'from __main__ import arr, next_greatest_element_slow, ' 'next_greatest_element_fast, next_greatest_element' ) print( 'next_greatest_element_slow():', timeit('next_greatest_element_slow(arr)', setup=setup), ) print( 'next_greatest_element_fast():', timeit('next_greatest_element_fast(arr)', setup=setup), ) print( ' next_greatest_element():', timeit('next_greatest_element(arr)', setup=setup), )
30
import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() __a = [ 'word_embeddings_layernorm.weight', 'word_embeddings_layernorm.bias', 'input_layernorm.weight', 'input_layernorm.bias', 'post_attention_layernorm.weight', 'post_attention_layernorm.bias', 'self_attention.dense.bias', 'mlp.dense_4h_to_h.bias', 'ln_f.weight', 'ln_f.bias', ] __a = [ 'mlp.dense_4h_to_h.weight', 'self_attention.dense.weight', ] def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = { '''word_embeddings.weight''': '''word_embeddings.weight''', '''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''', '''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''', '''weight''': '''ln_f.weight''', '''bias''': '''ln_f.bias''', } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks UpperCAmelCase_ : Union[str, Any] = int(re.match(r'''.*layer_(\d*).*''' , _lowercase )[1] ) layer_number -= 3 return f'''h.{layer_number}.''' + key def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if dtype == torch.bool: return 1 / 8 UpperCAmelCase_ : Any = re.search(r'''[^\d](\d+)$''' , str(_lowercase ) ) if bit_search is None: raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' ) UpperCAmelCase_ : Optional[int] = int(bit_search.groups()[0] ) return bit_size // 8 def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if bloom_config_file == "": UpperCAmelCase_ : Tuple = BloomConfig() else: UpperCAmelCase_ : Optional[int] = BloomConfig.from_json_file(_lowercase ) if shard_model: UpperCAmelCase_ : Any = os.listdir(_lowercase ) UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) ) UpperCAmelCase_ : Any = {'''weight_map''': {}, '''metadata''': {}} UpperCAmelCase_ : List[str] = 0 UpperCAmelCase_ : Any = None UpperCAmelCase_ : Optional[int] = BloomConfig() for j, file in enumerate(_lowercase ): print('''Processing file: {}'''.format(_lowercase ) ) UpperCAmelCase_ : Optional[Any] = None for i in range(_lowercase ): # load all TP files UpperCAmelCase_ : Tuple = file.replace('''model_00''' , f'''model_0{i}''' ) UpperCAmelCase_ : Any = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' ) # Rename keys in the transformers names UpperCAmelCase_ : Dict = list(temp.keys() ) for key in keys: UpperCAmelCase_ : Union[str, Any] = temp.pop(_lowercase ) if tensors is None: UpperCAmelCase_ : Union[str, Any] = temp else: for key in tensors.keys(): if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel UpperCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks UpperCAmelCase_ : Tuple = torch.cat([tensors[key], temp[key]] , dim=_lowercase ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): UpperCAmelCase_ : List[str] = tensors[key] / pretraining_tp torch.save( _lowercase , os.path.join( _lowercase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): UpperCAmelCase_ : Union[str, Any] = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: UpperCAmelCase_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format( str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) UpperCAmelCase_ : List[Any] = BloomConfig() UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME UpperCAmelCase_ : List[str] = total_size with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) with open(os.path.join(_lowercase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f: UpperCAmelCase_ : Optional[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n''' f.write(_lowercase ) else: UpperCAmelCase_ : Any = BloomModel(_lowercase ) UpperCAmelCase_ : Tuple = os.listdir(_lowercase ) UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) ) UpperCAmelCase_ : Any = None for i, file in enumerate(_lowercase ): UpperCAmelCase_ : Optional[Any] = None for i in range(_lowercase ): # load all TP files UpperCAmelCase_ : List[Any] = file.replace('''model_00''' , f'''model_0{i}''' ) UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' ) # Rename keys in the transformers names UpperCAmelCase_ : str = list(temp.keys() ) for key in keys: UpperCAmelCase_ : Dict = temp.pop(_lowercase ) if tensors is None: UpperCAmelCase_ : int = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel UpperCAmelCase_ : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks UpperCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_lowercase ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): UpperCAmelCase_ : Dict = tensors[key] / pretraining_tp UpperCAmelCase_ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase ) assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected''' if missing_keys is None: UpperCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys ) else: UpperCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, f'''The keys {missing_keys} are missing''' # Save pytorch-model os.makedirs(_lowercase , exist_ok=_lowercase ) UpperCAmelCase_ : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' ) if config.torch_dtype is not None: UpperCAmelCase_ : Optional[int] = model.to(config.torch_dtype ) torch.save(model.state_dict() , _lowercase ) print(f'''Save configuration file to {pytorch_config_dump_path}''' ) with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--bloom_checkpoint_path', default=None, type=str, required=True, help='Path to the Megatron-LM checkpoint path.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--bloom_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--shard_model', action='store_true', help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint', ) parser.add_argument( '--pretraining_tp', default=4, type=int, help='Pretraining TP rank that has been used when training the model in Megatron-LM \n', ) __a = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
30
1
import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) set_seed(770) __a = { 'c_attn': 'att_proj', 'c_proj': 'out_proj', 'c_fc': 'in_proj', 'transformer.': '', 'h.': 'layers.', 'ln_1': 'layernorm_1', 'ln_2': 'layernorm_2', 'ln_f': 'layernorm_final', 'wpe': 'position_embeds_layer', 'wte': 'input_embeds_layer', } __a = { 'text_small': { 'repo_id': 'suno/bark', 'file_name': 'text.pt', }, 'coarse_small': { 'repo_id': 'suno/bark', 'file_name': 'coarse.pt', }, 'fine_small': { 'repo_id': 'suno/bark', 'file_name': 'fine.pt', }, 'text': { 'repo_id': 'suno/bark', 'file_name': 'text_2.pt', }, 'coarse': { 'repo_id': 'suno/bark', 'file_name': 'coarse_2.pt', }, 'fine': { 'repo_id': 'suno/bark', 'file_name': 'fine_2.pt', }, } __a = os.path.dirname(os.path.abspath(__file__)) __a = os.path.join(os.path.expanduser('~'), '.cache') __a = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0') def lowerCamelCase__ ( _lowercase , _lowercase=False ): '''simple docstring''' UpperCAmelCase_ : List[str] = model_type if use_small: key += "_small" return os.path.join(_lowercase , REMOTE_MODEL_PATHS[key]['''file_name'''] ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' os.makedirs(_lowercase , exist_ok=_lowercase ) hf_hub_download(repo_id=_lowercase , filename=_lowercase , local_dir=_lowercase ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=False , _lowercase="text" ): '''simple docstring''' if model_type == "text": UpperCAmelCase_ : Any = BarkSemanticModel UpperCAmelCase_ : List[str] = BarkSemanticConfig UpperCAmelCase_ : Any = BarkSemanticGenerationConfig elif model_type == "coarse": UpperCAmelCase_ : Optional[int] = BarkCoarseModel UpperCAmelCase_ : str = BarkCoarseConfig UpperCAmelCase_ : int = BarkCoarseGenerationConfig elif model_type == "fine": UpperCAmelCase_ : Optional[int] = BarkFineModel UpperCAmelCase_ : Dict = BarkFineConfig UpperCAmelCase_ : Optional[int] = BarkFineGenerationConfig else: raise NotImplementedError() UpperCAmelCase_ : int = f'''{model_type}_small''' if use_small else model_type UpperCAmelCase_ : Optional[int] = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(_lowercase ): logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' ) _download(model_info['''repo_id'''] , model_info['''file_name'''] ) UpperCAmelCase_ : str = torch.load(_lowercase , map_location=_lowercase ) # this is a hack UpperCAmelCase_ : int = checkpoint['''model_args'''] if "input_vocab_size" not in model_args: UpperCAmelCase_ : List[Any] = model_args['''vocab_size'''] UpperCAmelCase_ : Tuple = model_args['''vocab_size'''] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments UpperCAmelCase_ : List[Any] = model_args.pop('''n_head''' ) UpperCAmelCase_ : Tuple = model_args.pop('''n_embd''' ) UpperCAmelCase_ : Tuple = model_args.pop('''n_layer''' ) UpperCAmelCase_ : List[str] = ConfigClass(**checkpoint['''model_args'''] ) UpperCAmelCase_ : Tuple = ModelClass(config=_lowercase ) UpperCAmelCase_ : Optional[Any] = GenerationConfigClass() UpperCAmelCase_ : Any = model_generation_config UpperCAmelCase_ : Union[str, Any] = checkpoint['''model'''] # fixup checkpoint UpperCAmelCase_ : str = '''_orig_mod.''' for k, v in list(state_dict.items() ): if k.startswith(_lowercase ): # replace part of the key with corresponding layer name in HF implementation UpperCAmelCase_ : Any = k[len(_lowercase ) :] for old_layer_name in new_layer_name_dict: UpperCAmelCase_ : Tuple = new_k.replace(_lowercase , new_layer_name_dict[old_layer_name] ) UpperCAmelCase_ : Dict = state_dict.pop(_lowercase ) UpperCAmelCase_ : Union[str, Any] = set(state_dict.keys() ) - set(model.state_dict().keys() ) UpperCAmelCase_ : Tuple = {k for k in extra_keys if not k.endswith('''.attn.bias''' )} UpperCAmelCase_ : List[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() ) UpperCAmelCase_ : int = {k for k in missing_keys if not k.endswith('''.attn.bias''' )} if len(_lowercase ) != 0: raise ValueError(f'''extra keys found: {extra_keys}''' ) if len(_lowercase ) != 0: raise ValueError(f'''missing keys: {missing_keys}''' ) model.load_state_dict(_lowercase , strict=_lowercase ) UpperCAmelCase_ : List[str] = model.num_parameters(exclude_embeddings=_lowercase ) UpperCAmelCase_ : List[str] = checkpoint['''best_val_loss'''].item() logger.info(f'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(_lowercase , 3 )} loss''' ) model.eval() model.to(_lowercase ) del checkpoint, state_dict return model def lowerCamelCase__ ( _lowercase , _lowercase=False , _lowercase="text" ): '''simple docstring''' if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() UpperCAmelCase_ : Tuple = '''cpu''' # do conversion on cpu UpperCAmelCase_ : Tuple = _get_ckpt_path(_lowercase , use_small=_lowercase ) UpperCAmelCase_ : Optional[int] = _load_model(_lowercase , _lowercase , model_type=_lowercase , use_small=_lowercase ) # load bark initial model UpperCAmelCase_ : Union[str, Any] = _bark_load_model(_lowercase , '''cpu''' , model_type=_lowercase , use_small=_lowercase ) if model_type == "text": UpperCAmelCase_ : List[str] = bark_model['''model'''] if model.num_parameters(exclude_embeddings=_lowercase ) != bark_model.get_num_params(): raise ValueError('''initial and new models don\'t have the same number of parameters''' ) # check if same output as the bark model UpperCAmelCase_ : str = 5 UpperCAmelCase_ : List[str] = 10 if model_type in ["text", "coarse"]: UpperCAmelCase_ : int = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) UpperCAmelCase_ : int = bark_model(_lowercase )[0] UpperCAmelCase_ : int = model(_lowercase ) # take last logits UpperCAmelCase_ : str = output_new_model_total.logits[:, [-1], :] else: UpperCAmelCase_ : Union[str, Any] = 3 UpperCAmelCase_ : Union[str, Any] = 8 UpperCAmelCase_ : Optional[Any] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) UpperCAmelCase_ : Tuple = model(_lowercase , _lowercase ) UpperCAmelCase_ : Optional[int] = bark_model(_lowercase , _lowercase ) UpperCAmelCase_ : List[str] = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError('''initial and new outputs don\'t have the same shape''' ) if (output_new_model - output_old_model).abs().max().item() > 1E-3: raise ValueError('''initial and new outputs are not equal''' ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) model.save_pretrained(_lowercase ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = os.path.join(_lowercase , _lowercase ) UpperCAmelCase_ : List[Any] = BarkSemanticConfig.from_pretrained(os.path.join(_lowercase , '''config.json''' ) ) UpperCAmelCase_ : Optional[int] = BarkCoarseConfig.from_pretrained(os.path.join(_lowercase , '''config.json''' ) ) UpperCAmelCase_ : Tuple = BarkFineConfig.from_pretrained(os.path.join(_lowercase , '''config.json''' ) ) UpperCAmelCase_ : Dict = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' ) UpperCAmelCase_ : Tuple = BarkSemanticModel.from_pretrained(_lowercase ) UpperCAmelCase_ : Any = BarkCoarseModel.from_pretrained(_lowercase ) UpperCAmelCase_ : List[str] = BarkFineModel.from_pretrained(_lowercase ) UpperCAmelCase_ : Tuple = EncodecModel.from_pretrained('''facebook/encodec_24khz''' ) UpperCAmelCase_ : int = BarkConfig.from_sub_model_configs( _lowercase , _lowercase , _lowercase , _lowercase ) UpperCAmelCase_ : Union[str, Any] = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) UpperCAmelCase_ : Tuple = BarkModel(_lowercase ) UpperCAmelCase_ : Tuple = semantic UpperCAmelCase_ : List[Any] = coarseAcoustic UpperCAmelCase_ : Any = fineAcoustic UpperCAmelCase_ : List[Any] = codec UpperCAmelCase_ : int = bark_generation_config Path(_lowercase ).mkdir(exist_ok=_lowercase ) bark.save_pretrained(_lowercase , repo_id=_lowercase , push_to_hub=_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument('model_type', type=str, help='text, coarse or fine.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.') __a = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
30
def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Dict = 0 for i in range(1 , 1001 ): total += i**i return str(_lowercase )[-10:] if __name__ == "__main__": print(solution())
30
1
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(_lowercase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors UpperCAmelCase_ : Tuple = load_file(_lowercase ) UpperCAmelCase_ : Union[str, Any] = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: UpperCAmelCase_ : List[Any] = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) UpperCAmelCase_ : Optional[int] = pipeline.text_encoder else: UpperCAmelCase_ : int = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) UpperCAmelCase_ : str = pipeline.unet # find the target layer UpperCAmelCase_ : str = layer_infos.pop(0 ) while len(_lowercase ) > -1: try: UpperCAmelCase_ : Optional[Any] = curr_layer.__getattr__(_lowercase ) if len(_lowercase ) > 0: UpperCAmelCase_ : List[Any] = layer_infos.pop(0 ) elif len(_lowercase ) == 0: break except Exception: if len(_lowercase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: UpperCAmelCase_ : Dict = layer_infos.pop(0 ) UpperCAmelCase_ : Optional[int] = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(_lowercase ) else: pair_keys.append(_lowercase ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: UpperCAmelCase_ : Union[str, Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) UpperCAmelCase_ : List[str] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_lowercase , _lowercase ).unsqueeze(2 ).unsqueeze(3 ) else: UpperCAmelCase_ : Optional[Any] = state_dict[pair_keys[0]].to(torch.floataa ) UpperCAmelCase_ : List[Any] = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_lowercase , _lowercase ) # update visited list for item in pair_keys: visited.append(_lowercase ) return pipeline if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( '--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.' ) parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors' ) parser.add_argument( '--lora_prefix_text_encoder', default='lora_te', type=str, help='The prefix of text encoder weight in safetensors', ) parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW') parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.' ) parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') __a = parser.parse_args() __a = args.base_model_path __a = args.checkpoint_path __a = args.dump_path __a = args.lora_prefix_unet __a = args.lora_prefix_text_encoder __a = args.alpha __a = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) __a = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
30
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType __a = None __a = '<' if sys.byteorder == 'little' else '>' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image __a = [ np.dtype('|b1'), np.dtype('|u1'), np.dtype('<u2'), np.dtype('>u2'), np.dtype('<i2'), np.dtype('>i2'), np.dtype('<u4'), np.dtype('>u4'), np.dtype('<i4'), np.dtype('>i4'), np.dtype('<f4'), np.dtype('>f4'), np.dtype('<f8'), np.dtype('>f8'), ] @dataclass class __a: """simple docstring""" lowerCAmelCase = True lowerCAmelCase = None # Automatically constructed lowerCAmelCase = "PIL.Image.Image" lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a ) def __call__( self ) -> Tuple: return self.pa_type def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : List[str] = np.array(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): return {"path": value, "bytes": None} elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): return {"path": None, "bytes": value} elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(_SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(_SCREAMING_SNAKE_CASE ) elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image": if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support decoding images, please install \'Pillow\'.''' ) if token_per_repo_id is None: UpperCAmelCase_ : Dict = {} UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = value['''path'''], value['''bytes'''] if bytes_ is None: if path is None: raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = PIL.Image.open(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ : Dict = path.split('''::''' )[-1] try: UpperCAmelCase_ : Optional[int] = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id'''] UpperCAmelCase_ : Tuple = token_per_repo_id.get(_SCREAMING_SNAKE_CASE ) except ValueError: UpperCAmelCase_ : Optional[Any] = None with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f: UpperCAmelCase_ : List[str] = BytesIO(f.read() ) UpperCAmelCase_ : Optional[Any] = PIL.Image.open(bytes_ ) else: UpperCAmelCase_ : List[Any] = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return ( self if self.decode else { "bytes": Value('''binary''' ), "path": Value('''string''' ), } ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray: if pa.types.is_string(storage.type ): UpperCAmelCase_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() ) UpperCAmelCase_ : Dict = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() ) UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: UpperCAmelCase_ : Dict = storage.field('''bytes''' ) else: UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: UpperCAmelCase_ : int = storage.field('''path''' ) else: UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() ) UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() ) elif pa.types.is_list(storage.type ): UpperCAmelCase_ : Optional[Any] = pa.array( [encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,) UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() ) UpperCAmelCase_ : Dict = pa.StructArray.from_arrays( [bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() ) return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(_SCREAMING_SNAKE_CASE ): with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f: UpperCAmelCase_ : Any = f.read() return bytes_ UpperCAmelCase_ : Union[str, Any] = pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] ,type=pa.binary() ,) UpperCAmelCase_ : List[str] = pa.array( [os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,) UpperCAmelCase_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() ) return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type ) def lowerCamelCase__ ( ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() UpperCAmelCase_ : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = BytesIO() if image.format in list_image_compression_formats(): UpperCAmelCase_ : int = image.format else: UpperCAmelCase_ : List[Any] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF''' image.save(_lowercase , format=_lowercase ) return buffer.getvalue() def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if hasattr(_lowercase , '''filename''' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(_lowercase )} def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) UpperCAmelCase_ : Tuple = array.dtype UpperCAmelCase_ : List[str] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER UpperCAmelCase_ : Dict = dtype.kind UpperCAmelCase_ : Union[str, Any] = dtype.itemsize UpperCAmelCase_ : Optional[Any] = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: UpperCAmelCase_ : Tuple = np.dtype('''|u1''' ) if dtype_kind not in ["u", "i"]: raise TypeError( f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: UpperCAmelCase_ : Union[str, Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: UpperCAmelCase_ : Union[str, Any] = dtype_byteorder + dtype_kind + str(_lowercase ) UpperCAmelCase_ : str = np.dtype(_lowercase ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) UpperCAmelCase_ : Any = PIL.Image.fromarray(array.astype(_lowercase ) ) return {"path": None, "bytes": image_to_bytes(_lowercase )} def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if objs: UpperCAmelCase_, UpperCAmelCase_ : Tuple = first_non_null_value(_lowercase ) if isinstance(_lowercase , _lowercase ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(_lowercase , np.ndarray ): UpperCAmelCase_ : Any = no_op_if_value_is_null(_lowercase ) return [obj_to_image_dict_func(_lowercase ) for obj in objs] elif isinstance(_lowercase , PIL.Image.Image ): UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase ) return [obj_to_image_dict_func(_lowercase ) for obj in objs] else: return objs else: return objs
30
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'} class __a( _a ): """simple docstring""" lowerCAmelCase = '''openai-gpt''' lowerCAmelCase = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self ,_SCREAMING_SNAKE_CASE=40_478 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE="cls_index" ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.1 ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]: UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : int = n_positions UpperCAmelCase_ : Dict = n_embd UpperCAmelCase_ : List[str] = n_layer UpperCAmelCase_ : Optional[int] = n_head UpperCAmelCase_ : List[Any] = afn UpperCAmelCase_ : Union[str, Any] = resid_pdrop UpperCAmelCase_ : Tuple = embd_pdrop UpperCAmelCase_ : Union[str, Any] = attn_pdrop UpperCAmelCase_ : Optional[int] = layer_norm_epsilon UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : List[str] = summary_type UpperCAmelCase_ : str = summary_use_proj UpperCAmelCase_ : List[Any] = summary_activation UpperCAmelCase_ : Optional[int] = summary_first_dropout UpperCAmelCase_ : List[Any] = summary_proj_to_labels super().__init__(**_SCREAMING_SNAKE_CASE )
30
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class __a( unittest.TestCase ): """simple docstring""" @slow def a__ ( self ) -> List[str]: UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) UpperCAmelCase_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(_SCREAMING_SNAKE_CASE ) from datasets import load_dataset UpperCAmelCase_ : Optional[int] = load_dataset('''nielsr/rvlcdip-demo''' ) UpperCAmelCase_ : Optional[Any] = dataset['''train'''][0]['''image'''].convert('''RGB''' ) UpperCAmelCase_ : str = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = outputs.logits UpperCAmelCase_ : Tuple = torch.Size((1, 16) ) self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = torch.tensor( [-0.41_58, -0.40_92, -0.43_47] ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.float ,) self.assertTrue(torch.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
30
1
from abc import ABC, abstractmethod from argparse import ArgumentParser class __a( _a ): """simple docstring""" @staticmethod @abstractmethod def a__ ( _SCREAMING_SNAKE_CASE ) -> str: raise NotImplementedError() @abstractmethod def a__ ( self ) -> int: raise NotImplementedError()
30
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __a = logging.get_logger(__name__) class __a( _a ): """simple docstring""" def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None: warnings.warn( '''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use CLIPImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,) super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
30
1
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __a = logging.get_logger(__name__) class __a( _a ): """simple docstring""" def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None: warnings.warn( '''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use CLIPImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,) super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
30
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __a( unittest.TestCase ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> Optional[int]: UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 30} UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30} UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : int = num_channels UpperCAmelCase_ : Any = min_resolution UpperCAmelCase_ : Tuple = max_resolution UpperCAmelCase_ : Optional[int] = do_resize_and_center_crop UpperCAmelCase_ : Tuple = size UpperCAmelCase_ : List[str] = crop_pct UpperCAmelCase_ : List[str] = crop_size UpperCAmelCase_ : Any = do_normalize UpperCAmelCase_ : str = image_mean UpperCAmelCase_ : List[Any] = image_std def a__ ( self ) -> str: return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a( _a , unittest.TestCase ): """simple docstring""" lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None def a__ ( self ) -> Dict: UpperCAmelCase_ : str = PoolFormerImageProcessingTester(self ) @property def a__ ( self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize_and_center_crop''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''crop_pct''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_mean''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_std''' ) ) def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'''shortest_edge''': 30} ) self.assertEqual(image_processor.crop_size ,{'''height''': 30, '''width''': 30} ) UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} ) def a__ ( self ) -> Optional[int]: pass def a__ ( self ) -> Dict: # Initialize image_processing UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image ) # Test not batched input UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) def a__ ( self ) -> List[Any]: # Initialize image_processing UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray ) # Test not batched input UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) def a__ ( self ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor ) # Test not batched input UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,)
30
1
from __future__ import annotations from math import pow, sqrt def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if resistance == 0: return {"resistance": sqrt(pow(_lowercase , 2 ) - pow(_lowercase , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(_lowercase , 2 ) - pow(_lowercase , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(_lowercase , 2 ) + pow(_lowercase , 2 ) )} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
30
import unittest import numpy as np def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ): '''simple docstring''' UpperCAmelCase_ : Dict = np.shape(_lowercase ) UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase ) UpperCAmelCase_ : Tuple = np.shape(_lowercase ) if shape_a[0] != shape_b[0]: UpperCAmelCase_ : Tuple = ( '''Expected the same number of rows for A and B. ''' f'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(_lowercase ) if shape_b[1] != shape_c[1]: UpperCAmelCase_ : List[Any] = ( '''Expected the same number of columns for B and C. ''' f'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(_lowercase ) UpperCAmelCase_ : Dict = pseudo_inv if a_inv is None: try: UpperCAmelCase_ : Any = np.linalg.inv(_lowercase ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __a( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> None: UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] ) UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] ) UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE ) self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s ) def a__ ( self ) -> None: UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> None: UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
30
1
from __future__ import annotations import collections import pprint from pathlib import Path def lowerCamelCase__ ( _lowercase ): '''simple docstring''' return "".join(sorted(_lowercase ) ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' return word_by_signature[signature(_lowercase )] __a = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8') __a = sorted({word.strip().lower() for word in data.splitlines()}) __a = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": __a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('anagrams.txt', 'w') as file: file.write('all_anagrams = \n ') file.write(pprint.pformat(all_anagrams))
30
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): UpperCAmelCase_ : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_lowercase ) UpperCAmelCase_ : Any = ''''''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data ) UpperCAmelCase_ : Any = len(_lowercase ) % 6 != 0 if padding_needed: # The padding that will be added later UpperCAmelCase_ : Union[str, Any] = B'''=''' * ((6 - len(_lowercase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_lowercase ) % 6) else: UpperCAmelCase_ : int = B'''''' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_lowercase ) , 6 ) ).encode() + padding ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ): UpperCAmelCase_ : Tuple = ( '''argument should be a bytes-like object or ASCII string, ''' f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_lowercase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_lowercase , _lowercase ): try: UpperCAmelCase_ : Any = encoded_data.decode('''utf-8''' ) except UnicodeDecodeError: raise ValueError('''base64 encoded data should only contain ASCII characters''' ) UpperCAmelCase_ : str = encoded_data.count('''=''' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one UpperCAmelCase_ : List[Any] = encoded_data[:-padding] UpperCAmelCase_ : List[Any] = ''''''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: UpperCAmelCase_ : Tuple = ''''''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data ) UpperCAmelCase_ : str = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_lowercase ) , 8 ) ] return bytes(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
30
1
from __future__ import annotations def lowerCamelCase__ ( _lowercase , _lowercase = None , _lowercase = None ): '''simple docstring''' if start is None: UpperCAmelCase_ : List[str] = 0 if end is None: UpperCAmelCase_ : Dict = len(_lowercase ) - 1 if start >= end: return UpperCAmelCase_ : Optional[Any] = (start + end) // 2 slowsort(_lowercase , _lowercase , _lowercase ) slowsort(_lowercase , mid + 1 , _lowercase ) if sequence[end] < sequence[mid]: UpperCAmelCase_, UpperCAmelCase_ : List[str] = sequence[mid], sequence[end] slowsort(_lowercase , _lowercase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
30
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __a( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = 0 @slow def a__ ( self ) -> Any: for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 ) def a__ ( self ) -> Optional[Any]: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size ,12 ) def a__ ( self ) -> Tuple: UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size ,20 ) def a__ ( self ) -> List[str]: UpperCAmelCase_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Check that tokenizer_type ≠ model_type UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size ,12 ) def a__ ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) ) UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) ) UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) @require_tokenizers def a__ ( self ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) ) UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) ) UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> int: with pytest.raises(_SCREAMING_SNAKE_CASE ): AutoTokenizer.from_pretrained('''./''' ,tokenizer_type='''xxx''' ) @require_tokenizers def a__ ( self ) -> Optional[Any]: for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: UpperCAmelCase_ : Any = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE ) else: self.assertEqual(tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.model_max_length ,512 ) @require_tokenizers def a__ ( self ) -> List[Any]: for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' ,): UpperCAmelCase_ : int = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def a__ ( self ) -> Optional[Any]: # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai UpperCAmelCase_ : int = TOKENIZER_MAPPING.values() UpperCAmelCase_ : List[Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_SCREAMING_SNAKE_CASE ) @require_tokenizers def a__ ( self ) -> Tuple: self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ,use_fast=_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) ,_SCREAMING_SNAKE_CASE ) @require_tokenizers def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' ,do_lower_case=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = '''Hello, world. How are you?''' UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertEqual('''[UNK]''' ,tokens[0] ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' ,do_lower_case=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertEqual('''[UNK]''' ,tokens[0] ) @require_tokenizers def a__ ( self ) -> Dict: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.model_max_length ,512 ) self.assertEqual(tokenizer.vocab_size ,30_000 ) self.assertEqual(tokenizer.unk_token ,'''[UNK]''' ) self.assertEqual(tokenizer.padding_side ,'''right''' ) self.assertEqual(tokenizer.truncation_side ,'''right''' ) def a__ ( self ) -> Dict: UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size ,12 ) def a__ ( self ) -> Optional[Any]: UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> str: # Check we can load the tokenizer config of an online model. UpperCAmelCase_ : int = get_tokenizer_config('''bert-base-cased''' ) UpperCAmelCase_ : Optional[int] = config.pop('''_commit_hash''' ,_SCREAMING_SNAKE_CASE ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_SCREAMING_SNAKE_CASE ,{'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. UpperCAmelCase_ : Any = get_tokenizer_config(_SCREAMING_SNAKE_CASE ) self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = get_tokenizer_config(_SCREAMING_SNAKE_CASE ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] ,'''BertTokenizer''' ) def a__ ( self ) -> str: try: AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_SCREAMING_SNAKE_CASE ): AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = CustomTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def a__ ( self ) -> int: try: AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE ) # Can register in two steps AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_SCREAMING_SNAKE_CASE ): AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE ) bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def a__ ( self ) -> Optional[int]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' ) @require_tokenizers def a__ ( self ) -> Optional[int]: class __a( _a ): """simple docstring""" lowerCAmelCase = False class __a( _a ): """simple docstring""" lowerCAmelCase = NewTokenizer lowerCAmelCase = False try: AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def a__ ( self ) -> int: UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) def a__ ( self ) -> Optional[Any]: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' ) def a__ ( self ) -> List[Any]: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' ) def a__ ( self ) -> Any: # Make sure we have cached the tokenizer. UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count ,0 ) self.assertEqual(counter.head_request_count ,1 ) self.assertEqual(counter.other_request_count ,0 )
30
1
def lowerCamelCase__ ( _lowercase , _lowercase = False ): '''simple docstring''' if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3317044064679887385961981 and not allow_probable: raise ValueError( '''Warning: upper bound of deterministic test is exceeded. ''' '''Pass allow_probable=True to allow probabilistic test. ''' '''A return value of True indicates a probable prime.''' ) # array bounds provided by analysis UpperCAmelCase_ : Tuple = [ 2047, 1373653, 25326001, 3215031751, 2152302898747, 3474749660383, 341550071728321, 1, 3825123056546413051, 1, 1, 318665857834031151167461, 3317044064679887385961981, ] UpperCAmelCase_ : Dict = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(_lowercase , 1 ): if n < _p: # then we have our last prime to check UpperCAmelCase_ : Dict = primes[:idx] break UpperCAmelCase_, UpperCAmelCase_ : Dict = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: UpperCAmelCase_ : Any = False for r in range(_lowercase ): UpperCAmelCase_ : Optional[int] = pow(_lowercase , d * 2**r , _lowercase ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): UpperCAmelCase_ : int = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def lowerCamelCase__ ( ): '''simple docstring''' assert not miller_rabin(561 ) assert miller_rabin(563 ) # 2047 assert not miller_rabin(838201 ) assert miller_rabin(838207 ) # 1_373_653 assert not miller_rabin(17316001 ) assert miller_rabin(17316017 ) # 25_326_001 assert not miller_rabin(3078386641 ) assert miller_rabin(3078386653 ) # 3_215_031_751 assert not miller_rabin(1713045574801 ) assert miller_rabin(1713045574819 ) # 2_152_302_898_747 assert not miller_rabin(2779799728307 ) assert miller_rabin(2779799728327 ) # 3_474_749_660_383 assert not miller_rabin(113850023909441 ) assert miller_rabin(113850023909527 ) # 341_550_071_728_321 assert not miller_rabin(1275041018848804351 ) assert miller_rabin(1275041018848804391 ) # 3_825_123_056_546_413_051 assert not miller_rabin(79666464458507787791867 ) assert miller_rabin(79666464458507787791951 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(552840677446647897660333 ) assert miller_rabin(552840677446647897660359 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
30
from functools import reduce __a = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def lowerCamelCase__ ( _lowercase = N ): '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) ) for i in range(len(_lowercase ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
30
1
from torch import nn class __a( nn.Module ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict: super().__init__() UpperCAmelCase_ : Optional[int] = class_size UpperCAmelCase_ : Any = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) UpperCAmelCase_ : str = nn.Linear(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]: # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) UpperCAmelCase_ : Dict = self.mlp(_SCREAMING_SNAKE_CASE ) return logits
30
from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) UpperCAmelCase_ : Tuple = precision UpperCAmelCase_ : Optional[Any] = ceil(precision / 14 ) UpperCAmelCase_ : int = 426880 * Decimal(10005 ).sqrt() UpperCAmelCase_ : Tuple = 1 UpperCAmelCase_ : List[Any] = 13591409 UpperCAmelCase_ : Optional[Any] = Decimal(_lowercase ) for k in range(1 , _lowercase ): UpperCAmelCase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": __a = 50 print(F"""The first {n} digits of pi is: {pi(n)}""")
30
1
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __a = logging.get_logger(__name__) __a = {'vocab_file': 'spiece.model'} __a = { 'vocab_file': { 'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model', } } __a = { 'AI-Sweden/gpt-sw3-126m': 2_048, 'AI-Sweden/gpt-sw3-350m': 2_048, 'AI-Sweden/gpt-sw3-1.6b': 2_048, 'AI-Sweden/gpt-sw3-6.7b': 2_048, 'AI-Sweden/gpt-sw3-20b': 2_048, } class __a( _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = ['''input_ids''', '''attention_mask'''] def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> None: UpperCAmelCase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs UpperCAmelCase_ : int = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) UpperCAmelCase_ : Optional[int] = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing UpperCAmelCase_ : Union[str, Any] = '''<|endoftext|>''' if eos_token is None else eos_token UpperCAmelCase_ : Optional[int] = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: UpperCAmelCase_ : int = unk_token if pad_token is None else pad_token UpperCAmelCase_ : List[Any] = eos_token if bos_token is None else bos_token else: UpperCAmelCase_ : Optional[Any] = '''<pad>''' if pad_token is None else pad_token UpperCAmelCase_ : str = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=_SCREAMING_SNAKE_CASE ,remove_space=_SCREAMING_SNAKE_CASE ,keep_accents=_SCREAMING_SNAKE_CASE ,bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,sp_model_kwargs=self.sp_model_kwargs ,**_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : Optional[int] = do_lower_case UpperCAmelCase_ : Any = remove_space UpperCAmelCase_ : Dict = keep_accents UpperCAmelCase_ : Dict = vocab_file UpperCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_SCREAMING_SNAKE_CASE ) # Used for whitespace normalization in input texts # fmt : off UpperCAmelCase_ : Union[str, Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing UpperCAmelCase_ : Tuple = re.compile( f'''[{"".join(map(_SCREAMING_SNAKE_CASE ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8_203] ) )}]''' ) def __getstate__( self ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = self.__dict__.copy() UpperCAmelCase_ : int = None return state def __setstate__( self ,_SCREAMING_SNAKE_CASE ) -> Dict: UpperCAmelCase_ : int = d # for backward compatibility if not hasattr(self ,'''sp_model_kwargs''' ): UpperCAmelCase_ : List[str] = {} UpperCAmelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def a__ ( self ) -> int: return len(self.sp_model ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str: UpperCAmelCase_ : Optional[Any] = self.non_printing_characters_re.sub('''''' ,_SCREAMING_SNAKE_CASE ) # Normalize whitespaces UpperCAmelCase_ : List[str] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFC''' ,_SCREAMING_SNAKE_CASE ) return text def a__ ( self ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[str]: UpperCAmelCase_ : Optional[Any] = self.preprocess_text(_SCREAMING_SNAKE_CASE ) return self.sp_model.encode(_SCREAMING_SNAKE_CASE ,out_type=_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> int: return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str: return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE ) @staticmethod def a__ ( _SCREAMING_SNAKE_CASE ) -> str: return out_string def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str: UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : int = '''''' UpperCAmelCase_ : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token UpperCAmelCase_ : str = True UpperCAmelCase_ : int = [] else: current_sub_tokens.append(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = False out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) return out_string def a__ ( self ) -> Dict[str, int]: UpperCAmelCase_ : int = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : int = os.path.join( _SCREAMING_SNAKE_CASE ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(_SCREAMING_SNAKE_CASE ,'''wb''' ) as fi: UpperCAmelCase_ : Dict = self.sp_model.serialized_model_proto() fi.write(_SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : str = self.preprocess_text(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = self.sp_model.encode(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ : Any = [self.preprocess_text(_SCREAMING_SNAKE_CASE ) for t in text] UpperCAmelCase_ : Optional[int] = self.sp_model.encode(_SCREAMING_SNAKE_CASE ) if return_tensors is True or return_tensors == "pt": UpperCAmelCase_ : List[Any] = torch.tensor(_SCREAMING_SNAKE_CASE ) return token_ids def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str: return self.sp_model.decode(_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[int]: UpperCAmelCase_ : str = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()] UpperCAmelCase_ : Any = ( f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(_SCREAMING_SNAKE_CASE ) + f'''{self.bos_token}Bot:''' ) return self.encode(text=_SCREAMING_SNAKE_CASE )
30
from __future__ import annotations import math __a = '2020.9.26' __a = 'xcodz-dot, cclaus, dhruvmanila' def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ): UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}''' raise TypeError(_lowercase ) UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): raise TypeError('''Axis must be a str''' ) UpperCAmelCase_ : Optional[Any] = locals() del input_variables["axis"] if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ): UpperCAmelCase_ : List[Any] = ( '''Input values except axis must either be float or int: ''' f'''{list(input_variables.values() )}''' ) raise TypeError(_lowercase ) UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi if axis == "z": UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase ) UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = z elif axis == "x": UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase ) UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase ) UpperCAmelCase_ : Dict = x elif axis == "y": UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = y else: raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""") print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
30
1
import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : List[Any] = argparse.ArgumentParser() parser.add_argument('''--model_ckpt''' , type=_lowercase , default='''microsoft/unixcoder-base-nine''' ) parser.add_argument('''--num_epochs''' , type=_lowercase , default=5 ) parser.add_argument('''--batch_size''' , type=_lowercase , default=6 ) parser.add_argument('''--gradient_accumulation_steps''' , type=_lowercase , default=1 ) parser.add_argument('''--freeze''' , type=_lowercase , default=_lowercase ) parser.add_argument('''--learning_rate''' , type=_lowercase , default=5E-4 ) parser.add_argument('''--seed''' , type=_lowercase , default=0 ) parser.add_argument('''--lr_scheduler_type''' , type=_lowercase , default='''cosine''' ) parser.add_argument('''--num_warmup_steps''' , type=_lowercase , default=10 ) parser.add_argument('''--weight_decay''' , type=_lowercase , default=0.01 ) parser.add_argument('''--output_dir''' , type=_lowercase , default='''./results''' ) return parser.parse_args() __a = load('accuracy') def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_, UpperCAmelCase_ : List[str] = eval_pred UpperCAmelCase_ : str = np.argmax(_lowercase , axis=1 ) return metric.compute(predictions=_lowercase , references=_lowercase ) class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ) -> None: super().__init__() UpperCAmelCase_ : List[Any] = trainer def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Any: if control.should_evaluate: UpperCAmelCase_ : Union[str, Any] = deepcopy(_SCREAMING_SNAKE_CASE ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset ,metric_key_prefix='''train''' ) return control_copy def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : str = get_args() set_seed(args.seed ) UpperCAmelCase_ : Optional[int] = load_dataset('''codeparrot/codecomplex''' , split='''train''' ) UpperCAmelCase_ : Optional[int] = dataset.train_test_split(test_size=0.2 ) UpperCAmelCase_ : Dict = train_test['''test'''].train_test_split(test_size=0.5 ) UpperCAmelCase_ : Tuple = DatasetDict( { '''train''': train_test['''train'''], '''test''': test_validation['''train'''], '''valid''': test_validation['''test'''], } ) print('''Loading tokenizer and model''' ) UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(args.model_ckpt ) UpperCAmelCase_ : str = tokenizer.eos_token UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) UpperCAmelCase_ : str = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): UpperCAmelCase_ : Optional[int] = False UpperCAmelCase_ : List[Any] = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) ) def tokenize(_lowercase ): UpperCAmelCase_ : Dict = tokenizer(example['''src'''] , truncation=_lowercase , max_length=1024 ) UpperCAmelCase_ : Tuple = labels.straint(example['''complexity'''] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } UpperCAmelCase_ : Dict = train_test_validation.map( _lowercase , batched=_lowercase , remove_columns=train_test_validation['''train'''].column_names , ) UpperCAmelCase_ : List[Any] = DataCollatorWithPadding(tokenizer=_lowercase ) UpperCAmelCase_ : int = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , ) UpperCAmelCase_ : int = Trainer( model=_lowercase , args=_lowercase , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , ) print('''Training...''' ) trainer.add_callback(CustomCallback(_lowercase ) ) trainer.train() if __name__ == "__main__": main()
30
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __a = '2.13.1' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('3.7'): raise ImportWarning( 'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( 'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n' 'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __a = concatenate_datasets __a = DownloadConfig __a = DownloadManager __a = DownloadMode __a = DownloadConfig __a = DownloadMode __a = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
30
1
from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCamelCase__ ( _lowercase ): '''simple docstring''' for param in module.parameters(): UpperCAmelCase_ : str = False def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCAmelCase_ : Dict = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Tuple = plt.imshow(_lowercase ) fig.axes.get_xaxis().set_visible(_lowercase ) fig.axes.get_yaxis().set_visible(_lowercase ) plt.show() def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = datetime.now() UpperCAmelCase_ : Optional[Any] = current_time.strftime('''%H:%M:%S''' ) return timestamp
30
def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' while a != 0: UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = b % a, a return b def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' if gcd(_lowercase , _lowercase ) != 1: UpperCAmelCase_ : int = f'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(_lowercase ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = 1, 0, a UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = 0, 1, m while va != 0: UpperCAmelCase_ : List[Any] = ua // va UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
30
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __a( _a ): """simple docstring""" lowerCAmelCase = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) lowerCAmelCase = '''CIDAS/clipseg-rd64-refined''' lowerCAmelCase = '''image_segmenter''' lowerCAmelCase = CLIPSegForImageSegmentation lowerCAmelCase = ['''image''', '''text'''] lowerCAmelCase = ['''image'''] def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict: requires_backends(self ,['''vision'''] ) super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: return self.pre_processor(text=[label] ,images=[image] ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str: with torch.no_grad(): UpperCAmelCase_ : Dict = self.model(**_SCREAMING_SNAKE_CASE ).logits return logits def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict: UpperCAmelCase_ : Dict = outputs.cpu().detach().numpy() UpperCAmelCase_ : Any = 0 UpperCAmelCase_ : List[Any] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
30
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __a( _a ): """simple docstring""" lowerCAmelCase = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) lowerCAmelCase = '''CIDAS/clipseg-rd64-refined''' lowerCAmelCase = '''image_segmenter''' lowerCAmelCase = CLIPSegForImageSegmentation lowerCAmelCase = ['''image''', '''text'''] lowerCAmelCase = ['''image'''] def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict: requires_backends(self ,['''vision'''] ) super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: return self.pre_processor(text=[label] ,images=[image] ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str: with torch.no_grad(): UpperCAmelCase_ : Dict = self.model(**_SCREAMING_SNAKE_CASE ).logits return logits def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict: UpperCAmelCase_ : Dict = outputs.cpu().detach().numpy() UpperCAmelCase_ : Any = 0 UpperCAmelCase_ : List[Any] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
30
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=_a ) class __a( _a ): """simple docstring""" lowerCAmelCase = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) lowerCAmelCase = Features({'''image''': Image()} ) lowerCAmelCase = Features({'''labels''': ClassLabel} ) lowerCAmelCase = "image" lowerCAmelCase = "labels" def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] ,_SCREAMING_SNAKE_CASE ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) UpperCAmelCase_ : Tuple = copy.deepcopy(self ) UpperCAmelCase_ : int = self.label_schema.copy() UpperCAmelCase_ : Optional[Any] = features[self.label_column] UpperCAmelCase_ : Dict = label_schema return task_template @property def a__ ( self ) -> Dict[str, str]: return { self.image_column: "image", self.label_column: "labels", }
30
import numpy as np import datasets __a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' __a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' __a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __a( datasets.Metric ): """simple docstring""" def a__ ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''' ,id='''sequence''' ) ,id='''X''' ), } ) ,) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: # convert to numpy arrays UpperCAmelCase_ : str = np.array(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.array(_SCREAMING_SNAKE_CASE ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('''Expected `X` to be a 2D vector''' ) if len(reference_distribution.shape ) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''' ) if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' ) # Get mahalanobis distance for each prediction UpperCAmelCase_ : List[str] = X - np.mean(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = np.cov(reference_distribution.T ) try: UpperCAmelCase_ : Any = np.linalg.inv(_SCREAMING_SNAKE_CASE ) except np.linalg.LinAlgError: UpperCAmelCase_ : List[str] = np.linalg.pinv(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.dot(_SCREAMING_SNAKE_CASE ,X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
30
1
import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class __a( _a ): """simple docstring""" lowerCAmelCase = ComputeEnvironment.AMAZON_SAGEMAKER lowerCAmelCase = True lowerCAmelCase = '''ml.p3.2xlarge''' lowerCAmelCase = '''accelerate_sagemaker_execution_role''' lowerCAmelCase = '''hf-sm''' lowerCAmelCase = '''us-east-1''' lowerCAmelCase = 1 lowerCAmelCase = '''accelerate-sagemaker-1''' lowerCAmelCase = '''1.6''' lowerCAmelCase = '''4.4''' lowerCAmelCase = '''train.py''' lowerCAmelCase = [ '''--model_name_or_path''', '''bert''', '''--do_train''', '''False''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] lowerCAmelCase = [ '''--model_name_or_path''', '''bert''', '''--do_train''', '''--do_test''', '''False''', '''--do_predict''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] class __a( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> List[Any]: # If no defaults are changed, `to_kwargs` returns an empty dict. UpperCAmelCase_ : Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args['''model_name_or_path'''] ,_SCREAMING_SNAKE_CASE ) assert isinstance(converted_args['''do_train'''] ,_SCREAMING_SNAKE_CASE ) assert isinstance(converted_args['''epochs'''] ,_SCREAMING_SNAKE_CASE ) assert isinstance(converted_args['''learning_rate'''] ,_SCREAMING_SNAKE_CASE ) assert isinstance(converted_args['''max_steps'''] ,_SCREAMING_SNAKE_CASE ) with pytest.raises(_SCREAMING_SNAKE_CASE ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
30
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __a = logging.get_logger(__name__) __a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __a = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'facebook/dpr-ctx_encoder-single-nq-base': 512, 'facebook/dpr-ctx_encoder-multiset-base': 512, } __a = { 'facebook/dpr-question_encoder-single-nq-base': 512, 'facebook/dpr-question_encoder-multiset-base': 512, } __a = { 'facebook/dpr-reader-single-nq-base': 512, 'facebook/dpr-reader-multiset-base': 512, } __a = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } __a = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } __a = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class __a( _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __a( _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __a = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) __a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) __a = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(_a ) class __a: """simple docstring""" def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding: if titles is None and texts is None: return super().__call__( _SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) elif titles is None or texts is None: UpperCAmelCase_ : List[str] = titles if texts is None else texts return super().__call__( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : List[Any] = titles if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [titles] UpperCAmelCase_ : List[str] = texts if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [texts] UpperCAmelCase_ : Any = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = questions if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [questions] * n_passages if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError( f'''There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.''' ) UpperCAmelCase_ : Tuple = super().__call__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids'''] UpperCAmelCase_ : int = super().__call__(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids'''] UpperCAmelCase_ : Optional[int] = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ] } if return_attention_mask is not False: UpperCAmelCase_ : List[str] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) UpperCAmelCase_ : Dict = attention_mask return self.pad(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 4 ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Tuple = reader_input['''input_ids'''] UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = reader_output[:3] UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = sorted(range(_SCREAMING_SNAKE_CASE ) ,reverse=_SCREAMING_SNAKE_CASE ,key=relevance_logits.__getitem__ ) UpperCAmelCase_ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: UpperCAmelCase_ : List[Any] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id ) else: UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_SCREAMING_SNAKE_CASE ,top_spans=_SCREAMING_SNAKE_CASE ,) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_SCREAMING_SNAKE_CASE ,start_index=_SCREAMING_SNAKE_CASE ,end_index=_SCREAMING_SNAKE_CASE ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) ) if len(_SCREAMING_SNAKE_CASE ) >= num_spans: break return nbest_spans_predictions[:num_spans] def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Tuple = [] for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) UpperCAmelCase_ : int = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ,reverse=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' ) UpperCAmelCase_ : str = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_SCREAMING_SNAKE_CASE ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_a ) class __a( _a , _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase = ['''input_ids''', '''attention_mask''']
30
1
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __a = '2.13.1' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('3.7'): raise ImportWarning( 'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( 'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n' 'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __a = concatenate_datasets __a = DownloadConfig __a = DownloadManager __a = DownloadMode __a = DownloadConfig __a = DownloadMode __a = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __a = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json', # See all GLPN models at https://huggingface.co/models?filter=glpn } class __a( _a ): """simple docstring""" lowerCAmelCase = '''glpn''' def __init__( self ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=[2, 2, 2, 2] ,_SCREAMING_SNAKE_CASE=[8, 4, 2, 1] ,_SCREAMING_SNAKE_CASE=[32, 64, 160, 256] ,_SCREAMING_SNAKE_CASE=[7, 3, 3, 3] ,_SCREAMING_SNAKE_CASE=[4, 2, 2, 2] ,_SCREAMING_SNAKE_CASE=[1, 2, 5, 8] ,_SCREAMING_SNAKE_CASE=[4, 4, 4, 4] ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-6 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=-1 ,**_SCREAMING_SNAKE_CASE ,) -> List[Any]: super().__init__(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = num_channels UpperCAmelCase_ : Optional[int] = num_encoder_blocks UpperCAmelCase_ : Tuple = depths UpperCAmelCase_ : Tuple = sr_ratios UpperCAmelCase_ : Optional[int] = hidden_sizes UpperCAmelCase_ : Any = patch_sizes UpperCAmelCase_ : Union[str, Any] = strides UpperCAmelCase_ : str = mlp_ratios UpperCAmelCase_ : Any = num_attention_heads UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : Tuple = hidden_dropout_prob UpperCAmelCase_ : Dict = attention_probs_dropout_prob UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : int = drop_path_rate UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : Optional[Any] = decoder_hidden_size UpperCAmelCase_ : Optional[Any] = max_depth UpperCAmelCase_ : Any = head_in_index
30
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class __a( _a ): """simple docstring""" lowerCAmelCase = '''wav2vec2''' def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="sum" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]: super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = hidden_size UpperCAmelCase_ : Tuple = feat_extract_norm UpperCAmelCase_ : List[Any] = feat_extract_activation UpperCAmelCase_ : str = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = conv_bias UpperCAmelCase_ : str = num_conv_pos_embeddings UpperCAmelCase_ : Any = num_conv_pos_embedding_groups UpperCAmelCase_ : Tuple = len(self.conv_dim ) UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Dict = intermediate_size UpperCAmelCase_ : Any = hidden_act UpperCAmelCase_ : Any = num_attention_heads UpperCAmelCase_ : str = hidden_dropout UpperCAmelCase_ : int = attention_dropout UpperCAmelCase_ : Tuple = activation_dropout UpperCAmelCase_ : List[str] = feat_proj_dropout UpperCAmelCase_ : int = final_dropout UpperCAmelCase_ : Union[str, Any] = layerdrop UpperCAmelCase_ : Optional[Any] = layer_norm_eps UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : List[str] = vocab_size UpperCAmelCase_ : Optional[int] = do_stable_layer_norm UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase_ : Optional[int] = apply_spec_augment UpperCAmelCase_ : Tuple = mask_time_prob UpperCAmelCase_ : Optional[Any] = mask_time_length UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks UpperCAmelCase_ : Optional[Any] = mask_feature_prob UpperCAmelCase_ : str = mask_feature_length UpperCAmelCase_ : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations UpperCAmelCase_ : Union[str, Any] = num_codevectors_per_group UpperCAmelCase_ : Any = num_codevector_groups UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature UpperCAmelCase_ : List[str] = feat_quantizer_dropout UpperCAmelCase_ : Dict = num_negatives UpperCAmelCase_ : List[str] = codevector_dim UpperCAmelCase_ : List[str] = proj_codevector_dim UpperCAmelCase_ : str = diversity_loss_weight # ctc loss UpperCAmelCase_ : List[Any] = ctc_loss_reduction UpperCAmelCase_ : List[str] = ctc_zero_infinity # adapter UpperCAmelCase_ : Optional[Any] = add_adapter UpperCAmelCase_ : Any = adapter_kernel_size UpperCAmelCase_ : Optional[int] = adapter_stride UpperCAmelCase_ : List[Any] = num_adapter_layers UpperCAmelCase_ : Optional[Any] = output_hidden_size or hidden_size UpperCAmelCase_ : Optional[int] = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. UpperCAmelCase_ : List[str] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. UpperCAmelCase_ : List[str] = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = xvector_output_dim @property def a__ ( self ) -> Any: return functools.reduce(operator.mul ,self.conv_stride ,1 )
30
1
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = s.rsplit(_lowercase , _lowercase ) return new.join(_lowercase ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = {} UpperCAmelCase_ : Any = ['''group_1''', '''group_2''', '''group_3''', '''group_4'''] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: UpperCAmelCase_ : List[str] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' ) if "res_path" in key: UpperCAmelCase_ : Tuple = key.replace('''res_path.''' , '''res_path.path.''' ) if key.endswith('''.w''' ): UpperCAmelCase_ : Tuple = rreplace(_lowercase , '''.w''' , '''.weight''' , 1 ) if key.endswith('''.b''' ): UpperCAmelCase_ : int = rreplace(_lowercase , '''.b''' , '''.bias''' , 1 ) UpperCAmelCase_ : str = value.float() return upgrade @torch.no_grad() def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=None , _lowercase=True ): '''simple docstring''' from dall_e import Encoder UpperCAmelCase_ : Dict = Encoder() if os.path.exists(_lowercase ): UpperCAmelCase_ : List[str] = torch.load(_lowercase ) else: UpperCAmelCase_ : Dict = torch.hub.load_state_dict_from_url(_lowercase ) if isinstance(_lowercase , _lowercase ): UpperCAmelCase_ : Optional[int] = ckpt.state_dict() encoder.load_state_dict(_lowercase ) if config_path is not None: UpperCAmelCase_ : Dict = FlavaImageCodebookConfig.from_pretrained(_lowercase ) else: UpperCAmelCase_ : str = FlavaImageCodebookConfig() UpperCAmelCase_ : Optional[Any] = FlavaImageCodebook(_lowercase ).eval() UpperCAmelCase_ : Optional[int] = encoder.state_dict() UpperCAmelCase_ : Tuple = upgrade_state_dict(_lowercase ) hf_model.load_state_dict(_lowercase ) UpperCAmelCase_ : List[str] = hf_model.state_dict() UpperCAmelCase_ : List[Any] = count_parameters(_lowercase ) UpperCAmelCase_ : List[str] = count_parameters(_lowercase ) assert torch.allclose(_lowercase , _lowercase , atol=1E-3 ) if save_checkpoint: hf_model.save_pretrained(_lowercase ) else: return hf_state_dict if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') __a = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['NllbTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['NllbTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = RemBertConfig.from_json_file(_lowercase ) print('''Building PyTorch model from configuration: {}'''.format(str(_lowercase ) ) ) UpperCAmelCase_ : Union[str, Any] = RemBertModel(_lowercase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_lowercase , _lowercase , _lowercase ) # Save pytorch-model print('''Save PyTorch model to {}'''.format(_lowercase ) ) torch.save(model.state_dict() , _lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--rembert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained RemBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __a = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
30
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList __a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict: UpperCAmelCase_ : List[Any] = tokenizer UpperCAmelCase_ : int = dataset UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks UpperCAmelCase_ : Optional[int] = n_copies def __iter__( self ) -> Any: UpperCAmelCase_ : List[Any] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() ) UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]: UpperCAmelCase_ : str = start_length UpperCAmelCase_ : Optional[int] = eof_strings UpperCAmelCase_ : str = tokenizer def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) UpperCAmelCase_ : Optional[int] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(_SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase ) # last string should be "" return "".join(string_list[:-2] ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens for step, batch in tqdm(enumerate(_lowercase ) ): with torch.no_grad(): UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1] UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase ) # each task is generated batch_size times UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase ) UpperCAmelCase_ : Dict = accelerator.pad_across_processes( _lowercase , dim=1 , pad_index=tokenizer.pad_token_id ) UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) ) UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy() UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(_lowercase , _lowercase ): gen_token_dict[task].append(_lowercase ) UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase ) code_gens[task].append(remove_last_block(_lowercase ) ) return code_gens def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase ) UpperCAmelCase_ : int = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing UpperCAmelCase_ : List[Any] = '''false''' if args.num_workers is None: UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate UpperCAmelCase_ : int = Accelerator() set_seed(args.seed , device_specific=_lowercase ) # Load model and tokenizer UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt ) UpperCAmelCase_ : Any = tokenizer.eos_token UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings UpperCAmelCase_ : str = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ), } # Load evaluation dataset and metric UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' ) UpperCAmelCase_ : Dict = load_metric('''code_eval''' ) UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) UpperCAmelCase_ : str = args.n_samples // args.batch_size UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase ) # do not confuse args.batch_size, which is actually the num_return_sequences UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase ) UpperCAmelCase_ : int = complete_code( _lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , ) if accelerator.is_main_process: UpperCAmelCase_ : Any = [] for task in tqdm(range(_lowercase ) ): UpperCAmelCase_ : int = human_eval['''test'''][task]['''test'''] UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})''' references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute( references=_lowercase , predictions=_lowercase , num_workers=args.num_workers ) print(f'''Results: {pass_at_k}''' ) # Save results to json file with open(args.output_file , '''w''' ) as fp: json.dump(_lowercase , _lowercase ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
30
1
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets __a = datasets.logging.get_logger(__name__) __a = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n' __a = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n' __a = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n' __a = { 'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip', 'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip', 'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip', 'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip', 'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip', 'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip', 'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip', 'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip', 'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip', 'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __a( datasets.Metric ): """simple docstring""" def a__ ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/google-research/bleurt''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''string''' ,id='''sequence''' ), '''references''': datasets.Value('''string''' ,id='''sequence''' ), } ) ,codebase_urls=['''https://github.com/google-research/bleurt'''] ,reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] ,) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( '''Using default BLEURT-Base checkpoint for sequence maximum length 128. ''' '''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' ) UpperCAmelCase_ : Tuple = '''bleurt-base-128''' if self.config_name.lower() in CHECKPOINT_URLS: UpperCAmelCase_ : Optional[int] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: UpperCAmelCase_ : Tuple = self.config_name.upper() else: raise KeyError( f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' ) # download the model checkpoint specified by self.config_name and set up the scorer UpperCAmelCase_ : List[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) UpperCAmelCase_ : Any = score.BleurtScorer(os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: UpperCAmelCase_ : Optional[Any] = self.scorer.score(references=_SCREAMING_SNAKE_CASE ,candidates=_SCREAMING_SNAKE_CASE ) return {"scores": scores}
30
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType __a = logging.get_logger(__name__) __a = { 'openai/imagegpt-small': '', 'openai/imagegpt-medium': '', 'openai/imagegpt-large': '', } class __a( _a ): """simple docstring""" lowerCAmelCase = '''imagegpt''' lowerCAmelCase = ['''past_key_values'''] lowerCAmelCase = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]: UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Union[str, Any] = n_positions UpperCAmelCase_ : Union[str, Any] = n_embd UpperCAmelCase_ : Any = n_layer UpperCAmelCase_ : Optional[Any] = n_head UpperCAmelCase_ : Union[str, Any] = n_inner UpperCAmelCase_ : List[Any] = activation_function UpperCAmelCase_ : List[str] = resid_pdrop UpperCAmelCase_ : str = embd_pdrop UpperCAmelCase_ : Optional[Any] = attn_pdrop UpperCAmelCase_ : Dict = layer_norm_epsilon UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Dict = scale_attn_weights UpperCAmelCase_ : Any = use_cache UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx UpperCAmelCase_ : Tuple = reorder_and_upcast_attn UpperCAmelCase_ : int = tie_word_embeddings super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) class __a( _a ): """simple docstring""" @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ] ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]: UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) ) return inputs
30
1
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class __a: """simple docstring""" def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str: return None class __a: """simple docstring""" def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: return None class __a( unittest.TestCase ): """simple docstring""" lowerCAmelCase = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def a__ ( self ) -> Any: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(_SCREAMING_SNAKE_CASE ,'''tf''' ,12 ,**_SCREAMING_SNAKE_CASE ) @require_torch @slow def a__ ( self ) -> str: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(_SCREAMING_SNAKE_CASE ,'''pt''' ,12 ,**_SCREAMING_SNAKE_CASE ) @require_torch @slow def a__ ( self ) -> int: from transformers import BertModel UpperCAmelCase_ : Optional[int] = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words'''] with NamedTemporaryFile(mode='''w+t''' ) as vocab_file: vocab_file.write('''\n'''.join(_SCREAMING_SNAKE_CASE ) ) vocab_file.flush() UpperCAmelCase_ : int = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: UpperCAmelCase_ : List[Any] = BertModel(BertConfig(vocab_size=len(_SCREAMING_SNAKE_CASE ) ) ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) self._test_export(_SCREAMING_SNAKE_CASE ,'''pt''' ,12 ,_SCREAMING_SNAKE_CASE ) @require_tf @slow def a__ ( self ) -> Optional[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: UpperCAmelCase_ : Optional[int] = self._test_export(_SCREAMING_SNAKE_CASE ,'''tf''' ,12 ,**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = quantize(Path(_SCREAMING_SNAKE_CASE ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(_SCREAMING_SNAKE_CASE ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) @require_torch @slow def a__ ( self ) -> List[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: UpperCAmelCase_ : List[Any] = self._test_export(_SCREAMING_SNAKE_CASE ,'''pt''' ,12 ,**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = quantize(_SCREAMING_SNAKE_CASE ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(_SCREAMING_SNAKE_CASE ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ) -> Dict: try: # Compute path with TemporaryDirectory() as tempdir: UpperCAmelCase_ : Tuple = Path(_SCREAMING_SNAKE_CASE ).joinpath('''model.onnx''' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) return path except Exception as e: self.fail(_SCREAMING_SNAKE_CASE ) @require_torch @require_tokenizers @slow def a__ ( self ) -> Optional[int]: from transformers import BertModel UpperCAmelCase_ : str = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,'''pt''' ) @require_tf @require_tokenizers @slow def a__ ( self ) -> str: from transformers import TFBertModel UpperCAmelCase_ : List[str] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,'''tf''' ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]: UpperCAmelCase_ : Dict = FeatureExtractionPipeline(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1'''] UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : str = infer_shapes(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Assert all variables are present self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(_SCREAMING_SNAKE_CASE ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,_SCREAMING_SNAKE_CASE ) self.assertSequenceEqual(variable_names[3:] ,_SCREAMING_SNAKE_CASE ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: '''batch''', 1: '''sequence'''} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['''output_0'''] ,{0: '''batch''', 1: '''sequence'''} ) self.assertDictEqual(shapes['''output_1'''] ,{0: '''batch'''} ) def a__ ( self ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = ['''input_ids''', '''attention_mask''', '''token_type_ids'''] UpperCAmelCase_ : Dict = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]} UpperCAmelCase_, UpperCAmelCase_ : List[Any] = ensure_valid_input(FuncContiguousArgs() ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,3 ) # Should have exactly the same input names self.assertEqual(set(_SCREAMING_SNAKE_CASE ) ,set(_SCREAMING_SNAKE_CASE ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(_SCREAMING_SNAKE_CASE ,(tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = ensure_valid_input(FuncNonContiguousArgs() ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens['''input_ids'''] ) self.assertEqual(ordered_input_names[0] ,'''input_ids''' ) def a__ ( self ) -> str: UpperCAmelCase_ : Union[str, Any] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) ,'''-test''' ) self.assertEqual('''/home/something/my_fake_model-test.onnx''' ,generated.as_posix() )
30
import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() __a = [ 'word_embeddings_layernorm.weight', 'word_embeddings_layernorm.bias', 'input_layernorm.weight', 'input_layernorm.bias', 'post_attention_layernorm.weight', 'post_attention_layernorm.bias', 'self_attention.dense.bias', 'mlp.dense_4h_to_h.bias', 'ln_f.weight', 'ln_f.bias', ] __a = [ 'mlp.dense_4h_to_h.weight', 'self_attention.dense.weight', ] def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = { '''word_embeddings.weight''': '''word_embeddings.weight''', '''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''', '''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''', '''weight''': '''ln_f.weight''', '''bias''': '''ln_f.bias''', } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks UpperCAmelCase_ : Union[str, Any] = int(re.match(r'''.*layer_(\d*).*''' , _lowercase )[1] ) layer_number -= 3 return f'''h.{layer_number}.''' + key def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if dtype == torch.bool: return 1 / 8 UpperCAmelCase_ : Any = re.search(r'''[^\d](\d+)$''' , str(_lowercase ) ) if bit_search is None: raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' ) UpperCAmelCase_ : Optional[int] = int(bit_search.groups()[0] ) return bit_size // 8 def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if bloom_config_file == "": UpperCAmelCase_ : Tuple = BloomConfig() else: UpperCAmelCase_ : Optional[int] = BloomConfig.from_json_file(_lowercase ) if shard_model: UpperCAmelCase_ : Any = os.listdir(_lowercase ) UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) ) UpperCAmelCase_ : Any = {'''weight_map''': {}, '''metadata''': {}} UpperCAmelCase_ : List[str] = 0 UpperCAmelCase_ : Any = None UpperCAmelCase_ : Optional[int] = BloomConfig() for j, file in enumerate(_lowercase ): print('''Processing file: {}'''.format(_lowercase ) ) UpperCAmelCase_ : Optional[Any] = None for i in range(_lowercase ): # load all TP files UpperCAmelCase_ : Tuple = file.replace('''model_00''' , f'''model_0{i}''' ) UpperCAmelCase_ : Any = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' ) # Rename keys in the transformers names UpperCAmelCase_ : Dict = list(temp.keys() ) for key in keys: UpperCAmelCase_ : Union[str, Any] = temp.pop(_lowercase ) if tensors is None: UpperCAmelCase_ : Union[str, Any] = temp else: for key in tensors.keys(): if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel UpperCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks UpperCAmelCase_ : Tuple = torch.cat([tensors[key], temp[key]] , dim=_lowercase ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): UpperCAmelCase_ : List[str] = tensors[key] / pretraining_tp torch.save( _lowercase , os.path.join( _lowercase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): UpperCAmelCase_ : Union[str, Any] = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: UpperCAmelCase_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format( str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) UpperCAmelCase_ : List[Any] = BloomConfig() UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME UpperCAmelCase_ : List[str] = total_size with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) with open(os.path.join(_lowercase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f: UpperCAmelCase_ : Optional[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n''' f.write(_lowercase ) else: UpperCAmelCase_ : Any = BloomModel(_lowercase ) UpperCAmelCase_ : Tuple = os.listdir(_lowercase ) UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) ) UpperCAmelCase_ : Any = None for i, file in enumerate(_lowercase ): UpperCAmelCase_ : Optional[Any] = None for i in range(_lowercase ): # load all TP files UpperCAmelCase_ : List[Any] = file.replace('''model_00''' , f'''model_0{i}''' ) UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' ) # Rename keys in the transformers names UpperCAmelCase_ : str = list(temp.keys() ) for key in keys: UpperCAmelCase_ : Dict = temp.pop(_lowercase ) if tensors is None: UpperCAmelCase_ : int = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel UpperCAmelCase_ : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks UpperCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_lowercase ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): UpperCAmelCase_ : Dict = tensors[key] / pretraining_tp UpperCAmelCase_ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase ) assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected''' if missing_keys is None: UpperCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys ) else: UpperCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, f'''The keys {missing_keys} are missing''' # Save pytorch-model os.makedirs(_lowercase , exist_ok=_lowercase ) UpperCAmelCase_ : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' ) if config.torch_dtype is not None: UpperCAmelCase_ : Optional[int] = model.to(config.torch_dtype ) torch.save(model.state_dict() , _lowercase ) print(f'''Save configuration file to {pytorch_config_dump_path}''' ) with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--bloom_checkpoint_path', default=None, type=str, required=True, help='Path to the Megatron-LM checkpoint path.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--bloom_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--shard_model', action='store_true', help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint', ) parser.add_argument( '--pretraining_tp', default=4, type=int, help='Pretraining TP rank that has been used when training the model in Megatron-LM \n', ) __a = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
30
1
from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge __a = [ 'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the' ' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe' ' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.', 'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal' ' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s' ' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the' ' body.', 'Amnesty International releases its annual report on the death penalty. The report catalogs the use of' ' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the' ' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital' ' punishment.', ] __a = [ 'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .' ' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz' ' had informed his Lufthansa training school of an episode of severe depression, airline says .', 'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .' ' Israel and the United States opposed the move, which could open the door to war crimes investigations against' ' Israelis .', 'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to' ' death . Organization claims that governments around the world are using the threat of terrorism to advance' ' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death' ' sentences up by 28% .', ] def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : List[Any] = calculate_rouge(_lowercase , _lowercase , bootstrap_aggregation=_lowercase , rouge_keys=['''rouge2''', '''rougeL'''] ) assert isinstance(_lowercase , _lowercase ) UpperCAmelCase_ : Any = calculate_rouge(_lowercase , _lowercase , bootstrap_aggregation=_lowercase , rouge_keys=['''rouge2'''] ) assert ( pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean() ) def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = '''rougeLsum''' UpperCAmelCase_ : int = calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase , rouge_keys=[k] )[k] UpperCAmelCase_ : List[Any] = calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase , rouge_keys=[k] )[k] assert score > score_no_sep def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Dict = ['''rouge1''', '''rouge2''', '''rougeL'''] UpperCAmelCase_ : List[Any] = calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase , rouge_keys=_lowercase ) UpperCAmelCase_ : List[str] = calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase , rouge_keys=_lowercase ) assert score_sep == score_no_sep def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : str = [ '''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''', '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''', ] UpperCAmelCase_ : Optional[Any] = [ '''Margot Frank, died in 1945, a month earlier than previously thought.''', '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of''' ''' the final seconds on board Flight 9525.''', ] assert calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase ) == calculate_rouge(_lowercase , _lowercase , newline_sep=_lowercase ) def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = [ '''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" ''' ] UpperCAmelCase_ : Tuple = [ ''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .''' ] UpperCAmelCase_ : Optional[Any] = calculate_rouge(_lowercase , _lowercase , rouge_keys=['''rougeLsum'''] , newline_sep=_lowercase )['''rougeLsum'''] UpperCAmelCase_ : Tuple = calculate_rouge(_lowercase , _lowercase , rouge_keys=['''rougeLsum'''] )['''rougeLsum'''] assert new_score > prev_score def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : List[str] = Path('''examples/seq2seq/test_data/wmt_en_ro''' ) UpperCAmelCase_ : Optional[int] = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) ) assert isinstance(_lowercase , _lowercase ) UpperCAmelCase_ : Tuple = calculate_rouge_path( data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=_lowercase ) assert isinstance(_lowercase , _lowercase )
30
def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Dict = 0 for i in range(1 , 1001 ): total += i**i return str(_lowercase )[-10:] if __name__ == "__main__": print(solution())
30
1
import math import random def lowerCamelCase__ ( _lowercase , _lowercase = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __a = 0.02 def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(_lowercase ): # Forward propagation UpperCAmelCase_ : List[str] = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? UpperCAmelCase_ : Tuple = (expected / 100) - layer_a # Error delta UpperCAmelCase_ : int = layer_1_error * sigmoid_function(_lowercase , _lowercase ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __a = int(input('Expected value: ')) __a = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
30
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType __a = None __a = '<' if sys.byteorder == 'little' else '>' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image __a = [ np.dtype('|b1'), np.dtype('|u1'), np.dtype('<u2'), np.dtype('>u2'), np.dtype('<i2'), np.dtype('>i2'), np.dtype('<u4'), np.dtype('>u4'), np.dtype('<i4'), np.dtype('>i4'), np.dtype('<f4'), np.dtype('>f4'), np.dtype('<f8'), np.dtype('>f8'), ] @dataclass class __a: """simple docstring""" lowerCAmelCase = True lowerCAmelCase = None # Automatically constructed lowerCAmelCase = "PIL.Image.Image" lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a ) def __call__( self ) -> Tuple: return self.pa_type def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : List[str] = np.array(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): return {"path": value, "bytes": None} elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): return {"path": None, "bytes": value} elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(_SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(_SCREAMING_SNAKE_CASE ) elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image": if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support decoding images, please install \'Pillow\'.''' ) if token_per_repo_id is None: UpperCAmelCase_ : Dict = {} UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = value['''path'''], value['''bytes'''] if bytes_ is None: if path is None: raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = PIL.Image.open(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ : Dict = path.split('''::''' )[-1] try: UpperCAmelCase_ : Optional[int] = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id'''] UpperCAmelCase_ : Tuple = token_per_repo_id.get(_SCREAMING_SNAKE_CASE ) except ValueError: UpperCAmelCase_ : Optional[Any] = None with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f: UpperCAmelCase_ : List[str] = BytesIO(f.read() ) UpperCAmelCase_ : Optional[Any] = PIL.Image.open(bytes_ ) else: UpperCAmelCase_ : List[Any] = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return ( self if self.decode else { "bytes": Value('''binary''' ), "path": Value('''string''' ), } ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray: if pa.types.is_string(storage.type ): UpperCAmelCase_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() ) UpperCAmelCase_ : Dict = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() ) UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: UpperCAmelCase_ : Dict = storage.field('''bytes''' ) else: UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: UpperCAmelCase_ : int = storage.field('''path''' ) else: UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() ) UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() ) elif pa.types.is_list(storage.type ): UpperCAmelCase_ : Optional[Any] = pa.array( [encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,) UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() ) UpperCAmelCase_ : Dict = pa.StructArray.from_arrays( [bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() ) return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(_SCREAMING_SNAKE_CASE ): with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f: UpperCAmelCase_ : Any = f.read() return bytes_ UpperCAmelCase_ : Union[str, Any] = pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] ,type=pa.binary() ,) UpperCAmelCase_ : List[str] = pa.array( [os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,) UpperCAmelCase_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() ) return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type ) def lowerCamelCase__ ( ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() UpperCAmelCase_ : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = BytesIO() if image.format in list_image_compression_formats(): UpperCAmelCase_ : int = image.format else: UpperCAmelCase_ : List[Any] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF''' image.save(_lowercase , format=_lowercase ) return buffer.getvalue() def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if hasattr(_lowercase , '''filename''' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(_lowercase )} def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) UpperCAmelCase_ : Tuple = array.dtype UpperCAmelCase_ : List[str] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER UpperCAmelCase_ : Dict = dtype.kind UpperCAmelCase_ : Union[str, Any] = dtype.itemsize UpperCAmelCase_ : Optional[Any] = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: UpperCAmelCase_ : Tuple = np.dtype('''|u1''' ) if dtype_kind not in ["u", "i"]: raise TypeError( f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: UpperCAmelCase_ : Union[str, Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: UpperCAmelCase_ : Union[str, Any] = dtype_byteorder + dtype_kind + str(_lowercase ) UpperCAmelCase_ : str = np.dtype(_lowercase ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) UpperCAmelCase_ : Any = PIL.Image.fromarray(array.astype(_lowercase ) ) return {"path": None, "bytes": image_to_bytes(_lowercase )} def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if objs: UpperCAmelCase_, UpperCAmelCase_ : Tuple = first_non_null_value(_lowercase ) if isinstance(_lowercase , _lowercase ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(_lowercase , np.ndarray ): UpperCAmelCase_ : Any = no_op_if_value_is_null(_lowercase ) return [obj_to_image_dict_func(_lowercase ) for obj in objs] elif isinstance(_lowercase , PIL.Image.Image ): UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase ) return [obj_to_image_dict_func(_lowercase ) for obj in objs] else: return objs else: return objs
30
1
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput __a = 'scheduler_config.json' class __a( _a ): """simple docstring""" lowerCAmelCase = 1 lowerCAmelCase = 2 lowerCAmelCase = 3 lowerCAmelCase = 4 lowerCAmelCase = 5 lowerCAmelCase = 6 lowerCAmelCase = 7 lowerCAmelCase = 8 lowerCAmelCase = 9 lowerCAmelCase = 10 lowerCAmelCase = 11 lowerCAmelCase = 12 lowerCAmelCase = 13 lowerCAmelCase = 14 @dataclass class __a( _a ): """simple docstring""" lowerCAmelCase = 42 class __a: """simple docstring""" lowerCAmelCase = SCHEDULER_CONFIG_NAME lowerCAmelCase = [] lowerCAmelCase = True @classmethod def a__ ( cls ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> List[str]: UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : str = cls.load_config( pretrained_model_name_or_path=_SCREAMING_SNAKE_CASE ,subfolder=_SCREAMING_SNAKE_CASE ,return_unused_kwargs=_SCREAMING_SNAKE_CASE ,return_commit_hash=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) return cls.from_config(_SCREAMING_SNAKE_CASE ,return_unused_kwargs=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ,**_SCREAMING_SNAKE_CASE ) -> Dict: self.save_config(save_directory=_SCREAMING_SNAKE_CASE ,push_to_hub=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) @property def a__ ( self ) -> Tuple: return self._get_compatibles() @classmethod def a__ ( cls ) -> Optional[Any]: UpperCAmelCase_ : int = list(set([cls.__name__] + cls._compatibles ) ) UpperCAmelCase_ : int = importlib.import_module(__name__.split('''.''' )[0] ) UpperCAmelCase_ : Any = [ getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for c in compatible_classes_str if hasattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ] return compatible_classes
30
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class __a( unittest.TestCase ): """simple docstring""" @slow def a__ ( self ) -> List[str]: UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) UpperCAmelCase_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(_SCREAMING_SNAKE_CASE ) from datasets import load_dataset UpperCAmelCase_ : Optional[int] = load_dataset('''nielsr/rvlcdip-demo''' ) UpperCAmelCase_ : Optional[Any] = dataset['''train'''][0]['''image'''].convert('''RGB''' ) UpperCAmelCase_ : str = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = outputs.logits UpperCAmelCase_ : Tuple = torch.Size((1, 16) ) self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = torch.tensor( [-0.41_58, -0.40_92, -0.43_47] ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.float ,) self.assertTrue(torch.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
30
1
from ..utils import DummyObject, requires_backends class __a( metaclass=_a ): """simple docstring""" lowerCAmelCase = ['''flax''', '''transformers'''] def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]: requires_backends(self ,['''flax''', '''transformers'''] ) @classmethod def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict: requires_backends(cls ,['''flax''', '''transformers'''] ) @classmethod def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict: requires_backends(cls ,['''flax''', '''transformers'''] ) class __a( metaclass=_a ): """simple docstring""" lowerCAmelCase = ['''flax''', '''transformers'''] def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Optional[Any]: requires_backends(self ,['''flax''', '''transformers'''] ) @classmethod def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> str: requires_backends(cls ,['''flax''', '''transformers'''] ) @classmethod def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Optional[Any]: requires_backends(cls ,['''flax''', '''transformers'''] ) class __a( metaclass=_a ): """simple docstring""" lowerCAmelCase = ['''flax''', '''transformers'''] def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: requires_backends(self ,['''flax''', '''transformers'''] ) @classmethod def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Optional[Any]: requires_backends(cls ,['''flax''', '''transformers'''] ) @classmethod def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Optional[Any]: requires_backends(cls ,['''flax''', '''transformers'''] ) class __a( metaclass=_a ): """simple docstring""" lowerCAmelCase = ['''flax''', '''transformers'''] def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Any: requires_backends(self ,['''flax''', '''transformers'''] ) @classmethod def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[str]: requires_backends(cls ,['''flax''', '''transformers'''] ) @classmethod def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[str]: requires_backends(cls ,['''flax''', '''transformers'''] )
30
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __a = logging.get_logger(__name__) class __a( _a ): """simple docstring""" def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None: warnings.warn( '''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use CLIPImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,) super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
30
1
from __future__ import annotations def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' if b == 0: return (1, 0) ((UpperCAmelCase_), (UpperCAmelCase_)) : Union[str, Any] = extended_euclid(_lowercase , a % b ) UpperCAmelCase_ : Dict = a // b return (y, x - k * y) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' ((UpperCAmelCase_), (UpperCAmelCase_)) : Any = extended_euclid(_lowercase , _lowercase ) UpperCAmelCase_ : List[str] = na * na UpperCAmelCase_ : Union[str, Any] = ra * x * na + ra * y * na return (n % m + m) % m def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' ((UpperCAmelCase_), (UpperCAmelCase_)) : Optional[int] = extended_euclid(_lowercase , _lowercase ) if b < 0: UpperCAmelCase_ : int = (b % n + n) % n return b def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_, UpperCAmelCase_ : List[Any] = invert_modulo(_lowercase , _lowercase ), invert_modulo(_lowercase , _lowercase ) UpperCAmelCase_ : Dict = na * na UpperCAmelCase_ : Optional[Any] = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='chinese_remainder_theorem', verbose=True) testmod(name='chinese_remainder_theorem2', verbose=True) testmod(name='invert_modulo', verbose=True) testmod(name='extended_euclid', verbose=True)
30
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __a( unittest.TestCase ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> Optional[int]: UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 30} UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30} UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : int = num_channels UpperCAmelCase_ : Any = min_resolution UpperCAmelCase_ : Tuple = max_resolution UpperCAmelCase_ : Optional[int] = do_resize_and_center_crop UpperCAmelCase_ : Tuple = size UpperCAmelCase_ : List[str] = crop_pct UpperCAmelCase_ : List[str] = crop_size UpperCAmelCase_ : Any = do_normalize UpperCAmelCase_ : str = image_mean UpperCAmelCase_ : List[Any] = image_std def a__ ( self ) -> str: return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a( _a , unittest.TestCase ): """simple docstring""" lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None def a__ ( self ) -> Dict: UpperCAmelCase_ : str = PoolFormerImageProcessingTester(self ) @property def a__ ( self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize_and_center_crop''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''crop_pct''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_mean''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_std''' ) ) def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'''shortest_edge''': 30} ) self.assertEqual(image_processor.crop_size ,{'''height''': 30, '''width''': 30} ) UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} ) def a__ ( self ) -> Optional[int]: pass def a__ ( self ) -> Dict: # Initialize image_processing UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image ) # Test not batched input UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) def a__ ( self ) -> List[Any]: # Initialize image_processing UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray ) # Test not batched input UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) def a__ ( self ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor ) # Test not batched input UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,)
30
1
from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) UpperCAmelCase_ : Tuple = precision UpperCAmelCase_ : Optional[Any] = ceil(precision / 14 ) UpperCAmelCase_ : int = 426880 * Decimal(10005 ).sqrt() UpperCAmelCase_ : Tuple = 1 UpperCAmelCase_ : List[Any] = 13591409 UpperCAmelCase_ : Optional[Any] = Decimal(_lowercase ) for k in range(1 , _lowercase ): UpperCAmelCase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": __a = 50 print(F"""The first {n} digits of pi is: {pi(n)}""")
30
import unittest import numpy as np def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ): '''simple docstring''' UpperCAmelCase_ : Dict = np.shape(_lowercase ) UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase ) UpperCAmelCase_ : Tuple = np.shape(_lowercase ) if shape_a[0] != shape_b[0]: UpperCAmelCase_ : Tuple = ( '''Expected the same number of rows for A and B. ''' f'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(_lowercase ) if shape_b[1] != shape_c[1]: UpperCAmelCase_ : List[Any] = ( '''Expected the same number of columns for B and C. ''' f'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(_lowercase ) UpperCAmelCase_ : Dict = pseudo_inv if a_inv is None: try: UpperCAmelCase_ : Any = np.linalg.inv(_lowercase ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __a( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> None: UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] ) UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] ) UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE ) self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s ) def a__ ( self ) -> None: UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> None: UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
30
1
class __a: """simple docstring""" def __init__( self ) -> Any: UpperCAmelCase_ : Tuple = {} def a__ ( self ) -> None: print(self.vertex ) for i in self.vertex: print(_SCREAMING_SNAKE_CASE ,''' -> ''' ,''' -> '''.join([str(_SCREAMING_SNAKE_CASE ) for j in self.vertex[i]] ) ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> None: # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(_SCREAMING_SNAKE_CASE ) else: # else make a new vertex UpperCAmelCase_ : Dict = [to_vertex] def a__ ( self ) -> None: # visited array for storing already visited nodes UpperCAmelCase_ : List[str] = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> None: # mark start vertex as visited UpperCAmelCase_ : List[Any] = True print(_SCREAMING_SNAKE_CASE ,end=''' ''' ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __a = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print('DFS:') g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
30
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): UpperCAmelCase_ : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_lowercase ) UpperCAmelCase_ : Any = ''''''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data ) UpperCAmelCase_ : Any = len(_lowercase ) % 6 != 0 if padding_needed: # The padding that will be added later UpperCAmelCase_ : Union[str, Any] = B'''=''' * ((6 - len(_lowercase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_lowercase ) % 6) else: UpperCAmelCase_ : int = B'''''' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_lowercase ) , 6 ) ).encode() + padding ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ): UpperCAmelCase_ : Tuple = ( '''argument should be a bytes-like object or ASCII string, ''' f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_lowercase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_lowercase , _lowercase ): try: UpperCAmelCase_ : Any = encoded_data.decode('''utf-8''' ) except UnicodeDecodeError: raise ValueError('''base64 encoded data should only contain ASCII characters''' ) UpperCAmelCase_ : str = encoded_data.count('''=''' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one UpperCAmelCase_ : List[Any] = encoded_data[:-padding] UpperCAmelCase_ : List[Any] = ''''''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: UpperCAmelCase_ : Tuple = ''''''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data ) UpperCAmelCase_ : str = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_lowercase ) , 8 ) ] return bytes(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
30
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure)
30
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __a( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = 0 @slow def a__ ( self ) -> Any: for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 ) def a__ ( self ) -> Optional[Any]: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size ,12 ) def a__ ( self ) -> Tuple: UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size ,20 ) def a__ ( self ) -> List[str]: UpperCAmelCase_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Check that tokenizer_type ≠ model_type UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size ,12 ) def a__ ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) ) UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) ) UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) @require_tokenizers def a__ ( self ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) ) UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) ) UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> int: with pytest.raises(_SCREAMING_SNAKE_CASE ): AutoTokenizer.from_pretrained('''./''' ,tokenizer_type='''xxx''' ) @require_tokenizers def a__ ( self ) -> Optional[Any]: for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: UpperCAmelCase_ : Any = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE ) else: self.assertEqual(tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.model_max_length ,512 ) @require_tokenizers def a__ ( self ) -> List[Any]: for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' ,): UpperCAmelCase_ : int = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def a__ ( self ) -> Optional[Any]: # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai UpperCAmelCase_ : int = TOKENIZER_MAPPING.values() UpperCAmelCase_ : List[Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_SCREAMING_SNAKE_CASE ) @require_tokenizers def a__ ( self ) -> Tuple: self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ,use_fast=_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) ,_SCREAMING_SNAKE_CASE ) @require_tokenizers def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' ,do_lower_case=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = '''Hello, world. How are you?''' UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertEqual('''[UNK]''' ,tokens[0] ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' ,do_lower_case=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertEqual('''[UNK]''' ,tokens[0] ) @require_tokenizers def a__ ( self ) -> Dict: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.model_max_length ,512 ) self.assertEqual(tokenizer.vocab_size ,30_000 ) self.assertEqual(tokenizer.unk_token ,'''[UNK]''' ) self.assertEqual(tokenizer.padding_side ,'''right''' ) self.assertEqual(tokenizer.truncation_side ,'''right''' ) def a__ ( self ) -> Dict: UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size ,12 ) def a__ ( self ) -> Optional[Any]: UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> str: # Check we can load the tokenizer config of an online model. UpperCAmelCase_ : int = get_tokenizer_config('''bert-base-cased''' ) UpperCAmelCase_ : Optional[int] = config.pop('''_commit_hash''' ,_SCREAMING_SNAKE_CASE ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_SCREAMING_SNAKE_CASE ,{'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. UpperCAmelCase_ : Any = get_tokenizer_config(_SCREAMING_SNAKE_CASE ) self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = get_tokenizer_config(_SCREAMING_SNAKE_CASE ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] ,'''BertTokenizer''' ) def a__ ( self ) -> str: try: AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_SCREAMING_SNAKE_CASE ): AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = CustomTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def a__ ( self ) -> int: try: AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE ) # Can register in two steps AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_SCREAMING_SNAKE_CASE ): AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE ) bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def a__ ( self ) -> Optional[int]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' ) @require_tokenizers def a__ ( self ) -> Optional[int]: class __a( _a ): """simple docstring""" lowerCAmelCase = False class __a( _a ): """simple docstring""" lowerCAmelCase = NewTokenizer lowerCAmelCase = False try: AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def a__ ( self ) -> int: UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) def a__ ( self ) -> Optional[Any]: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' ) def a__ ( self ) -> List[Any]: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' ) def a__ ( self ) -> Any: # Make sure we have cached the tokenizer. UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count ,0 ) self.assertEqual(counter.head_request_count ,1 ) self.assertEqual(counter.other_request_count ,0 )
30
1
import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __a = logging.getLogger(__name__) __a = tf.data.AUTOTUNE def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : int = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' ) parser.add_argument( '''--pretrained_model_config''' , type=_lowercase , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , ) parser.add_argument( '''--tokenizer''' , type=_lowercase , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , ) parser.add_argument( '''--per_replica_batch_size''' , type=_lowercase , default=8 , help='''Batch size per TPU core.''' , ) parser.add_argument( '''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , ) parser.add_argument( '''--tpu_name''' , type=_lowercase , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , ) parser.add_argument( '''--tpu_zone''' , type=_lowercase , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , ) parser.add_argument( '''--gcp_project''' , type=_lowercase , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' ) parser.add_argument( '''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , ) parser.add_argument( '''--train_dataset''' , type=_lowercase , help='''Path to training dataset to load. If the path begins with `gs://`''' ''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , ) parser.add_argument( '''--shuffle_buffer_size''' , type=_lowercase , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , ) parser.add_argument( '''--eval_dataset''' , type=_lowercase , help='''Path to evaluation dataset to load. If the path begins with `gs://`''' ''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , ) parser.add_argument( '''--num_epochs''' , type=_lowercase , default=1 , help='''Number of epochs to train for.''' , ) parser.add_argument( '''--learning_rate''' , type=_lowercase , default=1E-4 , help='''Learning rate to use for training.''' , ) parser.add_argument( '''--weight_decay_rate''' , type=_lowercase , default=1E-3 , help='''Weight decay rate to use for training.''' , ) parser.add_argument( '''--max_length''' , type=_lowercase , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , ) parser.add_argument( '''--mlm_probability''' , type=_lowercase , default=0.15 , help='''Fraction of tokens to mask during training.''' , ) parser.add_argument('''--output_dir''' , type=_lowercase , required=_lowercase , help='''Path to save model checkpoints to.''' ) parser.add_argument('''--hub_model_id''' , type=_lowercase , help='''Model ID to upload to on the Hugging Face Hub.''' ) UpperCAmelCase_ : Optional[int] = parser.parse_args() return args def lowerCamelCase__ ( _lowercase ): '''simple docstring''' try: if args.tpu_name: UpperCAmelCase_ : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: UpperCAmelCase_ : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( '''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or ''' '''--gcp_project. When running on a TPU VM, use --tpu_name local.''' ) tf.config.experimental_connect_to_cluster(_lowercase ) tf.tpu.experimental.initialize_tpu_system(_lowercase ) return tpu def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[str] = 0 for file in file_list: UpperCAmelCase_ : Optional[int] = file.split('''/''' )[-1] UpperCAmelCase_ : Tuple = re.search(r'''-\d+-(\d+)\.tfrecord''' , _lowercase ).group(1 ) UpperCAmelCase_ : Dict = int(_lowercase ) num_samples += sample_count return num_samples def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ): '''simple docstring''' UpperCAmelCase_ : Tuple = count_samples(_lowercase ) UpperCAmelCase_ : List[Any] = tf.data.Dataset.from_tensor_slices(_lowercase ) if shuffle: UpperCAmelCase_ : List[Any] = dataset.shuffle(len(_lowercase ) ) UpperCAmelCase_ : Any = tf.data.TFRecordDataset(_lowercase , num_parallel_reads=_lowercase ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here UpperCAmelCase_ : Any = dataset.apply(tf.data.experimental.assert_cardinality(_lowercase ) ) UpperCAmelCase_ : List[Any] = dataset.map(_lowercase , num_parallel_calls=_lowercase ) if shuffle: assert shuffle_buffer_size is not None UpperCAmelCase_ : Tuple = dataset.shuffle(args.shuffle_buffer_size ) UpperCAmelCase_ : Optional[Any] = dataset.batch(_lowercase , drop_remainder=_lowercase ) UpperCAmelCase_ : Optional[Any] = dataset.map(_lowercase , num_parallel_calls=_lowercase ) UpperCAmelCase_ : Tuple = dataset.prefetch(_lowercase ) return dataset def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if not args.no_tpu: UpperCAmelCase_ : List[Any] = initialize_tpu(_lowercase ) UpperCAmelCase_ : Optional[int] = tf.distribute.TPUStrategy(_lowercase ) else: UpperCAmelCase_ : Dict = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' ) UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(args.tokenizer ) UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(args.pretrained_model_config ) UpperCAmelCase_ : int = tokenizer.vocab_size UpperCAmelCase_ : Union[str, Any] = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) ) if not training_records: raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' ) UpperCAmelCase_ : Tuple = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) ) if not eval_records: raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' ) UpperCAmelCase_ : Dict = count_samples(_lowercase ) UpperCAmelCase_ : Any = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) UpperCAmelCase_ : Optional[int] = steps_per_epoch * args.num_epochs with strategy.scope(): UpperCAmelCase_ : Any = TFAutoModelForMaskedLM.from_config(_lowercase ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built UpperCAmelCase_, UpperCAmelCase_ : Any = create_optimizer( num_train_steps=_lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_lowercase , metrics=['''accuracy'''] ) def decode_fn(_lowercase ): UpperCAmelCase_ : List[Any] = { '''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), '''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_lowercase , _lowercase ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. UpperCAmelCase_ : str = DataCollatorForLanguageModeling( tokenizer=_lowercase , mlm_probability=args.mlm_probability , mlm=_lowercase , return_tensors='''tf''' ) def mask_with_collator(_lowercase ): # TF really needs an isin() function UpperCAmelCase_ : Dict = ( ~tf.cast(batch['''attention_mask'''] , tf.bool ) | (batch['''input_ids'''] == tokenizer.cls_token_id) | (batch['''input_ids'''] == tokenizer.sep_token_id) ) UpperCAmelCase_, UpperCAmelCase_ : List[Any] = data_collator.tf_mask_tokens( batch['''input_ids'''] , vocab_size=len(_lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_lowercase , ) return batch UpperCAmelCase_ : Any = args.per_replica_batch_size * strategy.num_replicas_in_sync UpperCAmelCase_ : Any = prepare_dataset( _lowercase , decode_fn=_lowercase , mask_fn=_lowercase , batch_size=_lowercase , shuffle=_lowercase , shuffle_buffer_size=args.shuffle_buffer_size , ) UpperCAmelCase_ : Dict = prepare_dataset( _lowercase , decode_fn=_lowercase , mask_fn=_lowercase , batch_size=_lowercase , shuffle=_lowercase , ) UpperCAmelCase_ : Optional[Any] = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_lowercase ) ) model.fit( _lowercase , validation_data=_lowercase , epochs=args.num_epochs , callbacks=_lowercase , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __a = parse_args() main(args)
30
from functools import reduce __a = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def lowerCamelCase__ ( _lowercase = N ): '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) ) for i in range(len(_lowercase ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
30
1
from __future__ import annotations def lowerCamelCase__ ( _lowercase ): '''simple docstring''' return len(set(_lowercase ) ) == len(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
30
from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) UpperCAmelCase_ : Tuple = precision UpperCAmelCase_ : Optional[Any] = ceil(precision / 14 ) UpperCAmelCase_ : int = 426880 * Decimal(10005 ).sqrt() UpperCAmelCase_ : Tuple = 1 UpperCAmelCase_ : List[Any] = 13591409 UpperCAmelCase_ : Optional[Any] = Decimal(_lowercase ) for k in range(1 , _lowercase ): UpperCAmelCase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": __a = 50 print(F"""The first {n} digits of pi is: {pi(n)}""")
30
1
import math import qiskit def lowerCamelCase__ ( _lowercase = 1 , _lowercase = 1 , _lowercase = 1 ): '''simple docstring''' if ( isinstance(_lowercase , _lowercase ) or isinstance(_lowercase , _lowercase ) or isinstance(_lowercase , _lowercase ) ): raise TypeError('''inputs must be integers.''' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('''inputs must be positive.''' ) if ( (math.floor(_lowercase ) != input_a) or (math.floor(_lowercase ) != input_a) or (math.floor(_lowercase ) != carry_in) ): raise ValueError('''inputs must be exact integers.''' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('''inputs must be less or equal to 2.''' ) # build registers UpperCAmelCase_ : List[str] = qiskit.QuantumRegister(4 , '''qr''' ) UpperCAmelCase_ : Tuple = qiskit.ClassicalRegister(2 , '''cr''' ) # list the entries UpperCAmelCase_ : str = [input_a, input_a, carry_in] UpperCAmelCase_ : str = qiskit.QuantumCircuit(_lowercase , _lowercase ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(_lowercase ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(_lowercase ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(_lowercase ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , _lowercase ) # measure the last two qbits UpperCAmelCase_ : Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' ) UpperCAmelCase_ : str = qiskit.execute(_lowercase , _lowercase , shots=1000 ) return job.result().get_counts(_lowercase ) if __name__ == "__main__": print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
30
from __future__ import annotations import math __a = '2020.9.26' __a = 'xcodz-dot, cclaus, dhruvmanila' def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ): UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}''' raise TypeError(_lowercase ) UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): raise TypeError('''Axis must be a str''' ) UpperCAmelCase_ : Optional[Any] = locals() del input_variables["axis"] if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ): UpperCAmelCase_ : List[Any] = ( '''Input values except axis must either be float or int: ''' f'''{list(input_variables.values() )}''' ) raise TypeError(_lowercase ) UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi if axis == "z": UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase ) UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = z elif axis == "x": UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase ) UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase ) UpperCAmelCase_ : Dict = x elif axis == "y": UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = y else: raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""") print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
30
1
from ... import PretrainedConfig __a = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class __a( _a ): """simple docstring""" lowerCAmelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP lowerCAmelCase = '''nezha''' def __init__( self ,_SCREAMING_SNAKE_CASE=21_128 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-12 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> List[Any]: super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = vocab_size UpperCAmelCase_ : Tuple = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : Dict = num_attention_heads UpperCAmelCase_ : Tuple = hidden_act UpperCAmelCase_ : List[str] = intermediate_size UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : str = max_relative_position UpperCAmelCase_ : str = type_vocab_size UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : Any = layer_norm_eps UpperCAmelCase_ : Any = classifier_dropout UpperCAmelCase_ : Dict = use_cache
30
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __a = '2.13.1' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('3.7'): raise ImportWarning( 'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( 'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n' 'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __a = concatenate_datasets __a = DownloadConfig __a = DownloadManager __a = DownloadMode __a = DownloadConfig __a = DownloadMode __a = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
30
1
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList __a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict: UpperCAmelCase_ : List[Any] = tokenizer UpperCAmelCase_ : int = dataset UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks UpperCAmelCase_ : Optional[int] = n_copies def __iter__( self ) -> Any: UpperCAmelCase_ : List[Any] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() ) UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]: UpperCAmelCase_ : str = start_length UpperCAmelCase_ : Optional[int] = eof_strings UpperCAmelCase_ : str = tokenizer def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) UpperCAmelCase_ : Optional[int] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(_SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase ) # last string should be "" return "".join(string_list[:-2] ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens for step, batch in tqdm(enumerate(_lowercase ) ): with torch.no_grad(): UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1] UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase ) # each task is generated batch_size times UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase ) UpperCAmelCase_ : Dict = accelerator.pad_across_processes( _lowercase , dim=1 , pad_index=tokenizer.pad_token_id ) UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) ) UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy() UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(_lowercase , _lowercase ): gen_token_dict[task].append(_lowercase ) UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase ) code_gens[task].append(remove_last_block(_lowercase ) ) return code_gens def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase ) UpperCAmelCase_ : int = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing UpperCAmelCase_ : List[Any] = '''false''' if args.num_workers is None: UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate UpperCAmelCase_ : int = Accelerator() set_seed(args.seed , device_specific=_lowercase ) # Load model and tokenizer UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt ) UpperCAmelCase_ : Any = tokenizer.eos_token UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings UpperCAmelCase_ : str = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ), } # Load evaluation dataset and metric UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' ) UpperCAmelCase_ : Dict = load_metric('''code_eval''' ) UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) UpperCAmelCase_ : str = args.n_samples // args.batch_size UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase ) # do not confuse args.batch_size, which is actually the num_return_sequences UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase ) UpperCAmelCase_ : int = complete_code( _lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , ) if accelerator.is_main_process: UpperCAmelCase_ : Any = [] for task in tqdm(range(_lowercase ) ): UpperCAmelCase_ : int = human_eval['''test'''][task]['''test'''] UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})''' references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute( references=_lowercase , predictions=_lowercase , num_workers=args.num_workers ) print(f'''Results: {pass_at_k}''' ) # Save results to json file with open(args.output_file , '''w''' ) as fp: json.dump(_lowercase , _lowercase ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
30
def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' while a != 0: UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = b % a, a return b def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' if gcd(_lowercase , _lowercase ) != 1: UpperCAmelCase_ : int = f'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(_lowercase ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = 1, 0, a UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = 0, 1, m while va != 0: UpperCAmelCase_ : List[Any] = ua // va UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
30
1
from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class __a: """simple docstring""" pass
30
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __a( _a ): """simple docstring""" lowerCAmelCase = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) lowerCAmelCase = '''CIDAS/clipseg-rd64-refined''' lowerCAmelCase = '''image_segmenter''' lowerCAmelCase = CLIPSegForImageSegmentation lowerCAmelCase = ['''image''', '''text'''] lowerCAmelCase = ['''image'''] def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict: requires_backends(self ,['''vision'''] ) super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: return self.pre_processor(text=[label] ,images=[image] ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str: with torch.no_grad(): UpperCAmelCase_ : Dict = self.model(**_SCREAMING_SNAKE_CASE ).logits return logits def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict: UpperCAmelCase_ : Dict = outputs.cpu().detach().numpy() UpperCAmelCase_ : Any = 0 UpperCAmelCase_ : List[Any] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
30
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __a = logging.get_logger(__name__) __a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __a = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'facebook/dpr-ctx_encoder-single-nq-base': 512, 'facebook/dpr-ctx_encoder-multiset-base': 512, } __a = { 'facebook/dpr-question_encoder-single-nq-base': 512, 'facebook/dpr-question_encoder-multiset-base': 512, } __a = { 'facebook/dpr-reader-single-nq-base': 512, 'facebook/dpr-reader-multiset-base': 512, } __a = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } __a = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } __a = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class __a( _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __a( _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __a = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) __a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) __a = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(_a ) class __a: """simple docstring""" def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding: if titles is None and texts is None: return super().__call__( _SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) elif titles is None or texts is None: UpperCAmelCase_ : List[str] = titles if texts is None else texts return super().__call__( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : List[Any] = titles if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [titles] UpperCAmelCase_ : List[str] = texts if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [texts] UpperCAmelCase_ : Any = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = questions if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [questions] * n_passages if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError( f'''There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.''' ) UpperCAmelCase_ : Tuple = super().__call__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids'''] UpperCAmelCase_ : int = super().__call__(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids'''] UpperCAmelCase_ : Optional[int] = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ] } if return_attention_mask is not False: UpperCAmelCase_ : List[str] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) UpperCAmelCase_ : Dict = attention_mask return self.pad(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 4 ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Tuple = reader_input['''input_ids'''] UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = reader_output[:3] UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = sorted(range(_SCREAMING_SNAKE_CASE ) ,reverse=_SCREAMING_SNAKE_CASE ,key=relevance_logits.__getitem__ ) UpperCAmelCase_ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: UpperCAmelCase_ : List[Any] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id ) else: UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_SCREAMING_SNAKE_CASE ,top_spans=_SCREAMING_SNAKE_CASE ,) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_SCREAMING_SNAKE_CASE ,start_index=_SCREAMING_SNAKE_CASE ,end_index=_SCREAMING_SNAKE_CASE ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) ) if len(_SCREAMING_SNAKE_CASE ) >= num_spans: break return nbest_spans_predictions[:num_spans] def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Tuple = [] for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) UpperCAmelCase_ : int = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ,reverse=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' ) UpperCAmelCase_ : str = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_SCREAMING_SNAKE_CASE ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_a ) class __a( _a , _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase = ['''input_ids''', '''attention_mask''']
30
import numpy as np import datasets __a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' __a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' __a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __a( datasets.Metric ): """simple docstring""" def a__ ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''' ,id='''sequence''' ) ,id='''X''' ), } ) ,) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: # convert to numpy arrays UpperCAmelCase_ : str = np.array(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.array(_SCREAMING_SNAKE_CASE ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('''Expected `X` to be a 2D vector''' ) if len(reference_distribution.shape ) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''' ) if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' ) # Get mahalanobis distance for each prediction UpperCAmelCase_ : List[str] = X - np.mean(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = np.cov(reference_distribution.T ) try: UpperCAmelCase_ : Any = np.linalg.inv(_SCREAMING_SNAKE_CASE ) except np.linalg.LinAlgError: UpperCAmelCase_ : List[str] = np.linalg.pinv(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.dot(_SCREAMING_SNAKE_CASE ,X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
30
1
def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' return "\n".join( f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=10))
30
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __a = logging.get_logger(__name__) __a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __a = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'facebook/dpr-ctx_encoder-single-nq-base': 512, 'facebook/dpr-ctx_encoder-multiset-base': 512, } __a = { 'facebook/dpr-question_encoder-single-nq-base': 512, 'facebook/dpr-question_encoder-multiset-base': 512, } __a = { 'facebook/dpr-reader-single-nq-base': 512, 'facebook/dpr-reader-multiset-base': 512, } __a = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } __a = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } __a = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class __a( _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __a( _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __a = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) __a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) __a = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(_a ) class __a: """simple docstring""" def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding: if titles is None and texts is None: return super().__call__( _SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) elif titles is None or texts is None: UpperCAmelCase_ : List[str] = titles if texts is None else texts return super().__call__( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : List[Any] = titles if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [titles] UpperCAmelCase_ : List[str] = texts if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [texts] UpperCAmelCase_ : Any = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = questions if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [questions] * n_passages if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError( f'''There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.''' ) UpperCAmelCase_ : Tuple = super().__call__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids'''] UpperCAmelCase_ : int = super().__call__(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids'''] UpperCAmelCase_ : Optional[int] = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ] } if return_attention_mask is not False: UpperCAmelCase_ : List[str] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) UpperCAmelCase_ : Dict = attention_mask return self.pad(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 4 ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Tuple = reader_input['''input_ids'''] UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = reader_output[:3] UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = sorted(range(_SCREAMING_SNAKE_CASE ) ,reverse=_SCREAMING_SNAKE_CASE ,key=relevance_logits.__getitem__ ) UpperCAmelCase_ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: UpperCAmelCase_ : List[Any] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id ) else: UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_SCREAMING_SNAKE_CASE ,top_spans=_SCREAMING_SNAKE_CASE ,) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_SCREAMING_SNAKE_CASE ,start_index=_SCREAMING_SNAKE_CASE ,end_index=_SCREAMING_SNAKE_CASE ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) ) if len(_SCREAMING_SNAKE_CASE ) >= num_spans: break return nbest_spans_predictions[:num_spans] def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Tuple = [] for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) UpperCAmelCase_ : int = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ,reverse=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' ) UpperCAmelCase_ : str = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_SCREAMING_SNAKE_CASE ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_a ) class __a( _a , _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase = ['''input_ids''', '''attention_mask''']
30
1
import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC __a = parse(importlib.metadata.version('torch')) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' ) UpperCAmelCase_ : Optional[Any] = STR_OPERATION_TO_FUNC[operation] if isinstance(_lowercase , _lowercase ): UpperCAmelCase_ : Any = parse(importlib.metadata.version(_lowercase ) ) return operation(_lowercase , parse(_lowercase ) ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' return compare_versions(_lowercase , _lowercase , _lowercase )
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __a = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
def lowerCamelCase__ ( _lowercase ): '''simple docstring''' assert ( isinstance(_lowercase , _lowercase ) and number_of_steps > 0 ), f'''number_of_steps needs to be positive integer, your input {number_of_steps}''' if number_of_steps == 1: return 1 UpperCAmelCase_, UpperCAmelCase_ : Tuple = 1, 1 for _ in range(number_of_steps - 1 ): UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
30
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class __a( _a ): """simple docstring""" lowerCAmelCase = '''wav2vec2''' def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="sum" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]: super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = hidden_size UpperCAmelCase_ : Tuple = feat_extract_norm UpperCAmelCase_ : List[Any] = feat_extract_activation UpperCAmelCase_ : str = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = conv_bias UpperCAmelCase_ : str = num_conv_pos_embeddings UpperCAmelCase_ : Any = num_conv_pos_embedding_groups UpperCAmelCase_ : Tuple = len(self.conv_dim ) UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Dict = intermediate_size UpperCAmelCase_ : Any = hidden_act UpperCAmelCase_ : Any = num_attention_heads UpperCAmelCase_ : str = hidden_dropout UpperCAmelCase_ : int = attention_dropout UpperCAmelCase_ : Tuple = activation_dropout UpperCAmelCase_ : List[str] = feat_proj_dropout UpperCAmelCase_ : int = final_dropout UpperCAmelCase_ : Union[str, Any] = layerdrop UpperCAmelCase_ : Optional[Any] = layer_norm_eps UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : List[str] = vocab_size UpperCAmelCase_ : Optional[int] = do_stable_layer_norm UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase_ : Optional[int] = apply_spec_augment UpperCAmelCase_ : Tuple = mask_time_prob UpperCAmelCase_ : Optional[Any] = mask_time_length UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks UpperCAmelCase_ : Optional[Any] = mask_feature_prob UpperCAmelCase_ : str = mask_feature_length UpperCAmelCase_ : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations UpperCAmelCase_ : Union[str, Any] = num_codevectors_per_group UpperCAmelCase_ : Any = num_codevector_groups UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature UpperCAmelCase_ : List[str] = feat_quantizer_dropout UpperCAmelCase_ : Dict = num_negatives UpperCAmelCase_ : List[str] = codevector_dim UpperCAmelCase_ : List[str] = proj_codevector_dim UpperCAmelCase_ : str = diversity_loss_weight # ctc loss UpperCAmelCase_ : List[Any] = ctc_loss_reduction UpperCAmelCase_ : List[str] = ctc_zero_infinity # adapter UpperCAmelCase_ : Optional[Any] = add_adapter UpperCAmelCase_ : Any = adapter_kernel_size UpperCAmelCase_ : Optional[int] = adapter_stride UpperCAmelCase_ : List[Any] = num_adapter_layers UpperCAmelCase_ : Optional[Any] = output_hidden_size or hidden_size UpperCAmelCase_ : Optional[int] = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. UpperCAmelCase_ : List[str] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. UpperCAmelCase_ : List[str] = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = xvector_output_dim @property def a__ ( self ) -> Any: return functools.reduce(operator.mul ,self.conv_stride ,1 )
30
1
import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": __a = argparse.ArgumentParser( description=( 'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2']) parser.add_argument('--model_name', default='roberta-large', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') __a = parser.parse_args() if args.model_type == "roberta": __a = RobertaForMaskedLM.from_pretrained(args.model_name) __a = 'roberta' elif args.model_type == "gpt2": __a = GPTaLMHeadModel.from_pretrained(args.model_name) __a = 'transformer' __a = model.state_dict() __a = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: __a = state_dict[F"""{prefix}.{param_name}"""] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: __a = F"""{prefix}.embeddings.{w}.weight""" __a = state_dict[param_name] for w in ["weight", "bias"]: __a = F"""{prefix}.embeddings.LayerNorm.{w}""" __a = state_dict[param_name] # Transformer Blocks # __a = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: __a = state_dict[ F"""{prefix}.h.{teacher_idx}.{layer}.{w}""" ] __a = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: __a = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}""" ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: __a = state_dict[F"""{layer}"""] if args.vocab_transform: for w in ["weight", "bias"]: __a = state_dict[F"""lm_head.dense.{w}"""] __a = state_dict[F"""lm_head.layer_norm.{w}"""] elif args.model_type == "gpt2": for w in ["weight", "bias"]: __a = state_dict[F"""{prefix}.ln_f.{w}"""] __a = state_dict['lm_head.weight'] print(F"""N layers selected for distillation: {std_idx}""") print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['NllbTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['NllbTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 __a = get_tests_dir('fixtures/dummy-config.json') class __a( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> int: UpperCAmelCase_ : List[Any] = 0 def a__ ( self ) -> Optional[int]: self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) ) def a__ ( self ) -> List[Any]: UpperCAmelCase_ : Any = AutoConfig.from_pretrained('''bert-base-uncased''' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Dict: UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> int: UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Dict: UpperCAmelCase_ : Dict = AutoConfig.for_model('''roberta''' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> List[Any]: with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. UpperCAmelCase_ : str = os.path.join(_SCREAMING_SNAKE_CASE ,'''fake-roberta''' ) os.makedirs(_SCREAMING_SNAKE_CASE ,exist_ok=_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE ,'''config.json''' ) ,'''w''' ) as f: f.write(json.dumps({} ) ) UpperCAmelCase_ : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertEqual(type(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> str: try: AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE ) # Wrong model type will raise an error with self.assertRaises(_SCREAMING_SNAKE_CASE ): AutoConfig.register('''model''' ,_SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_SCREAMING_SNAKE_CASE ): AutoConfig.register('''bert''' ,_SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase_ : List[Any] = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def a__ ( self ) -> Optional[int]: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase_ : Dict = AutoConfig.from_pretrained('''bert-base''' ) def a__ ( self ) -> List[Any]: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase_ : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' ) def a__ ( self ) -> str: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,'''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' ,): UpperCAmelCase_ : Dict = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' ) def a__ ( self ) -> str: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Dict = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Any = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertEqual(config.__class__.__name__ ,'''NewModelConfig''' ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertEqual(reloaded_config.__class__.__name__ ,'''NewModelConfig''' ) def a__ ( self ) -> str: class __a( _a ): """simple docstring""" lowerCAmelCase = '''new-model''' try: AutoConfig.register('''new-model''' ,_SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ) self.assertEqual(config.__class__.__name__ ,'''NewModelConfigLocal''' ) # If remote code is disabled, we load the local one. UpperCAmelCase_ : Dict = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertEqual(config.__class__.__name__ ,'''NewModelConfigLocal''' ) # If remote is enabled, we load from the Hub UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertEqual(config.__class__.__name__ ,'''NewModelConfig''' ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
30
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList __a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict: UpperCAmelCase_ : List[Any] = tokenizer UpperCAmelCase_ : int = dataset UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks UpperCAmelCase_ : Optional[int] = n_copies def __iter__( self ) -> Any: UpperCAmelCase_ : List[Any] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() ) UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]: UpperCAmelCase_ : str = start_length UpperCAmelCase_ : Optional[int] = eof_strings UpperCAmelCase_ : str = tokenizer def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) UpperCAmelCase_ : Optional[int] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(_SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase ) # last string should be "" return "".join(string_list[:-2] ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens for step, batch in tqdm(enumerate(_lowercase ) ): with torch.no_grad(): UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1] UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase ) # each task is generated batch_size times UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase ) UpperCAmelCase_ : Dict = accelerator.pad_across_processes( _lowercase , dim=1 , pad_index=tokenizer.pad_token_id ) UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) ) UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy() UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(_lowercase , _lowercase ): gen_token_dict[task].append(_lowercase ) UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase ) code_gens[task].append(remove_last_block(_lowercase ) ) return code_gens def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase ) UpperCAmelCase_ : int = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing UpperCAmelCase_ : List[Any] = '''false''' if args.num_workers is None: UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate UpperCAmelCase_ : int = Accelerator() set_seed(args.seed , device_specific=_lowercase ) # Load model and tokenizer UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt ) UpperCAmelCase_ : Any = tokenizer.eos_token UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings UpperCAmelCase_ : str = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ), } # Load evaluation dataset and metric UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' ) UpperCAmelCase_ : Dict = load_metric('''code_eval''' ) UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) UpperCAmelCase_ : str = args.n_samples // args.batch_size UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase ) # do not confuse args.batch_size, which is actually the num_return_sequences UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase ) UpperCAmelCase_ : int = complete_code( _lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , ) if accelerator.is_main_process: UpperCAmelCase_ : Any = [] for task in tqdm(range(_lowercase ) ): UpperCAmelCase_ : int = human_eval['''test'''][task]['''test'''] UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})''' references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute( references=_lowercase , predictions=_lowercase , num_workers=args.num_workers ) print(f'''Results: {pass_at_k}''' ) # Save results to json file with open(args.output_file , '''w''' ) as fp: json.dump(_lowercase , _lowercase ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
30
1
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(_lowercase ) ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if index == len(_lowercase ): return True # Recursive Step for i in range(_lowercase ): if valid_coloring(graph[index] , _lowercase , _lowercase ): # Color current vertex UpperCAmelCase_ : Dict = i # Validate coloring if util_color(_lowercase , _lowercase , _lowercase , index + 1 ): return True # Backtrack UpperCAmelCase_ : List[Any] = -1 return False def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : str = [-1] * len(_lowercase ) if util_color(_lowercase , _lowercase , _lowercase , 0 ): return colored_vertices return []
30
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType __a = logging.get_logger(__name__) __a = { 'openai/imagegpt-small': '', 'openai/imagegpt-medium': '', 'openai/imagegpt-large': '', } class __a( _a ): """simple docstring""" lowerCAmelCase = '''imagegpt''' lowerCAmelCase = ['''past_key_values'''] lowerCAmelCase = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]: UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Union[str, Any] = n_positions UpperCAmelCase_ : Union[str, Any] = n_embd UpperCAmelCase_ : Any = n_layer UpperCAmelCase_ : Optional[Any] = n_head UpperCAmelCase_ : Union[str, Any] = n_inner UpperCAmelCase_ : List[Any] = activation_function UpperCAmelCase_ : List[str] = resid_pdrop UpperCAmelCase_ : str = embd_pdrop UpperCAmelCase_ : Optional[Any] = attn_pdrop UpperCAmelCase_ : Dict = layer_norm_epsilon UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Dict = scale_attn_weights UpperCAmelCase_ : Any = use_cache UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx UpperCAmelCase_ : Tuple = reorder_and_upcast_attn UpperCAmelCase_ : int = tie_word_embeddings super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) class __a( _a ): """simple docstring""" @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ] ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]: UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) ) return inputs
30
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetBackbone', 'FocalNetModel', 'FocalNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() __a = [ 'word_embeddings_layernorm.weight', 'word_embeddings_layernorm.bias', 'input_layernorm.weight', 'input_layernorm.bias', 'post_attention_layernorm.weight', 'post_attention_layernorm.bias', 'self_attention.dense.bias', 'mlp.dense_4h_to_h.bias', 'ln_f.weight', 'ln_f.bias', ] __a = [ 'mlp.dense_4h_to_h.weight', 'self_attention.dense.weight', ] def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = { '''word_embeddings.weight''': '''word_embeddings.weight''', '''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''', '''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''', '''weight''': '''ln_f.weight''', '''bias''': '''ln_f.bias''', } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks UpperCAmelCase_ : Union[str, Any] = int(re.match(r'''.*layer_(\d*).*''' , _lowercase )[1] ) layer_number -= 3 return f'''h.{layer_number}.''' + key def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if dtype == torch.bool: return 1 / 8 UpperCAmelCase_ : Any = re.search(r'''[^\d](\d+)$''' , str(_lowercase ) ) if bit_search is None: raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' ) UpperCAmelCase_ : Optional[int] = int(bit_search.groups()[0] ) return bit_size // 8 def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if bloom_config_file == "": UpperCAmelCase_ : Tuple = BloomConfig() else: UpperCAmelCase_ : Optional[int] = BloomConfig.from_json_file(_lowercase ) if shard_model: UpperCAmelCase_ : Any = os.listdir(_lowercase ) UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) ) UpperCAmelCase_ : Any = {'''weight_map''': {}, '''metadata''': {}} UpperCAmelCase_ : List[str] = 0 UpperCAmelCase_ : Any = None UpperCAmelCase_ : Optional[int] = BloomConfig() for j, file in enumerate(_lowercase ): print('''Processing file: {}'''.format(_lowercase ) ) UpperCAmelCase_ : Optional[Any] = None for i in range(_lowercase ): # load all TP files UpperCAmelCase_ : Tuple = file.replace('''model_00''' , f'''model_0{i}''' ) UpperCAmelCase_ : Any = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' ) # Rename keys in the transformers names UpperCAmelCase_ : Dict = list(temp.keys() ) for key in keys: UpperCAmelCase_ : Union[str, Any] = temp.pop(_lowercase ) if tensors is None: UpperCAmelCase_ : Union[str, Any] = temp else: for key in tensors.keys(): if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel UpperCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks UpperCAmelCase_ : Tuple = torch.cat([tensors[key], temp[key]] , dim=_lowercase ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): UpperCAmelCase_ : List[str] = tensors[key] / pretraining_tp torch.save( _lowercase , os.path.join( _lowercase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): UpperCAmelCase_ : Union[str, Any] = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: UpperCAmelCase_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format( str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) UpperCAmelCase_ : List[Any] = BloomConfig() UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME UpperCAmelCase_ : List[str] = total_size with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) with open(os.path.join(_lowercase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f: UpperCAmelCase_ : Optional[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n''' f.write(_lowercase ) else: UpperCAmelCase_ : Any = BloomModel(_lowercase ) UpperCAmelCase_ : Tuple = os.listdir(_lowercase ) UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) ) UpperCAmelCase_ : Any = None for i, file in enumerate(_lowercase ): UpperCAmelCase_ : Optional[Any] = None for i in range(_lowercase ): # load all TP files UpperCAmelCase_ : List[Any] = file.replace('''model_00''' , f'''model_0{i}''' ) UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' ) # Rename keys in the transformers names UpperCAmelCase_ : str = list(temp.keys() ) for key in keys: UpperCAmelCase_ : Dict = temp.pop(_lowercase ) if tensors is None: UpperCAmelCase_ : int = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel UpperCAmelCase_ : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks UpperCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_lowercase ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): UpperCAmelCase_ : Dict = tensors[key] / pretraining_tp UpperCAmelCase_ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase ) assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected''' if missing_keys is None: UpperCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys ) else: UpperCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, f'''The keys {missing_keys} are missing''' # Save pytorch-model os.makedirs(_lowercase , exist_ok=_lowercase ) UpperCAmelCase_ : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' ) if config.torch_dtype is not None: UpperCAmelCase_ : Optional[int] = model.to(config.torch_dtype ) torch.save(model.state_dict() , _lowercase ) print(f'''Save configuration file to {pytorch_config_dump_path}''' ) with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--bloom_checkpoint_path', default=None, type=str, required=True, help='Path to the Megatron-LM checkpoint path.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--bloom_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--shard_model', action='store_true', help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint', ) parser.add_argument( '--pretraining_tp', default=4, type=int, help='Pretraining TP rank that has been used when training the model in Megatron-LM \n', ) __a = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
30
1
from __future__ import annotations import math __a = '2020.9.26' __a = 'xcodz-dot, cclaus, dhruvmanila' def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ): UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}''' raise TypeError(_lowercase ) UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): raise TypeError('''Axis must be a str''' ) UpperCAmelCase_ : Optional[Any] = locals() del input_variables["axis"] if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ): UpperCAmelCase_ : List[Any] = ( '''Input values except axis must either be float or int: ''' f'''{list(input_variables.values() )}''' ) raise TypeError(_lowercase ) UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi if axis == "z": UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase ) UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = z elif axis == "x": UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase ) UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase ) UpperCAmelCase_ : Dict = x elif axis == "y": UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = y else: raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""") print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
30
def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Dict = 0 for i in range(1 , 1001 ): total += i**i return str(_lowercase )[-10:] if __name__ == "__main__": print(solution())
30
1
import unittest import numpy as np def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ): '''simple docstring''' UpperCAmelCase_ : Dict = np.shape(_lowercase ) UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase ) UpperCAmelCase_ : Tuple = np.shape(_lowercase ) if shape_a[0] != shape_b[0]: UpperCAmelCase_ : Tuple = ( '''Expected the same number of rows for A and B. ''' f'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(_lowercase ) if shape_b[1] != shape_c[1]: UpperCAmelCase_ : List[Any] = ( '''Expected the same number of columns for B and C. ''' f'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(_lowercase ) UpperCAmelCase_ : Dict = pseudo_inv if a_inv is None: try: UpperCAmelCase_ : Any = np.linalg.inv(_lowercase ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __a( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> None: UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] ) UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] ) UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE ) self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s ) def a__ ( self ) -> None: UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> None: UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
30
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType __a = None __a = '<' if sys.byteorder == 'little' else '>' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image __a = [ np.dtype('|b1'), np.dtype('|u1'), np.dtype('<u2'), np.dtype('>u2'), np.dtype('<i2'), np.dtype('>i2'), np.dtype('<u4'), np.dtype('>u4'), np.dtype('<i4'), np.dtype('>i4'), np.dtype('<f4'), np.dtype('>f4'), np.dtype('<f8'), np.dtype('>f8'), ] @dataclass class __a: """simple docstring""" lowerCAmelCase = True lowerCAmelCase = None # Automatically constructed lowerCAmelCase = "PIL.Image.Image" lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a ) def __call__( self ) -> Tuple: return self.pa_type def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : List[str] = np.array(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): return {"path": value, "bytes": None} elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): return {"path": None, "bytes": value} elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(_SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(_SCREAMING_SNAKE_CASE ) elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image": if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support decoding images, please install \'Pillow\'.''' ) if token_per_repo_id is None: UpperCAmelCase_ : Dict = {} UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = value['''path'''], value['''bytes'''] if bytes_ is None: if path is None: raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = PIL.Image.open(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ : Dict = path.split('''::''' )[-1] try: UpperCAmelCase_ : Optional[int] = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id'''] UpperCAmelCase_ : Tuple = token_per_repo_id.get(_SCREAMING_SNAKE_CASE ) except ValueError: UpperCAmelCase_ : Optional[Any] = None with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f: UpperCAmelCase_ : List[str] = BytesIO(f.read() ) UpperCAmelCase_ : Optional[Any] = PIL.Image.open(bytes_ ) else: UpperCAmelCase_ : List[Any] = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return ( self if self.decode else { "bytes": Value('''binary''' ), "path": Value('''string''' ), } ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray: if pa.types.is_string(storage.type ): UpperCAmelCase_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() ) UpperCAmelCase_ : Dict = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() ) UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: UpperCAmelCase_ : Dict = storage.field('''bytes''' ) else: UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: UpperCAmelCase_ : int = storage.field('''path''' ) else: UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() ) UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() ) elif pa.types.is_list(storage.type ): UpperCAmelCase_ : Optional[Any] = pa.array( [encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,) UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() ) UpperCAmelCase_ : Dict = pa.StructArray.from_arrays( [bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() ) return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(_SCREAMING_SNAKE_CASE ): with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f: UpperCAmelCase_ : Any = f.read() return bytes_ UpperCAmelCase_ : Union[str, Any] = pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] ,type=pa.binary() ,) UpperCAmelCase_ : List[str] = pa.array( [os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,) UpperCAmelCase_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() ) return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type ) def lowerCamelCase__ ( ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() UpperCAmelCase_ : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = BytesIO() if image.format in list_image_compression_formats(): UpperCAmelCase_ : int = image.format else: UpperCAmelCase_ : List[Any] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF''' image.save(_lowercase , format=_lowercase ) return buffer.getvalue() def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if hasattr(_lowercase , '''filename''' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(_lowercase )} def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) UpperCAmelCase_ : Tuple = array.dtype UpperCAmelCase_ : List[str] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER UpperCAmelCase_ : Dict = dtype.kind UpperCAmelCase_ : Union[str, Any] = dtype.itemsize UpperCAmelCase_ : Optional[Any] = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: UpperCAmelCase_ : Tuple = np.dtype('''|u1''' ) if dtype_kind not in ["u", "i"]: raise TypeError( f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: UpperCAmelCase_ : Union[str, Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: UpperCAmelCase_ : Union[str, Any] = dtype_byteorder + dtype_kind + str(_lowercase ) UpperCAmelCase_ : str = np.dtype(_lowercase ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) UpperCAmelCase_ : Any = PIL.Image.fromarray(array.astype(_lowercase ) ) return {"path": None, "bytes": image_to_bytes(_lowercase )} def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if objs: UpperCAmelCase_, UpperCAmelCase_ : Tuple = first_non_null_value(_lowercase ) if isinstance(_lowercase , _lowercase ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(_lowercase , np.ndarray ): UpperCAmelCase_ : Any = no_op_if_value_is_null(_lowercase ) return [obj_to_image_dict_func(_lowercase ) for obj in objs] elif isinstance(_lowercase , PIL.Image.Image ): UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase ) return [obj_to_image_dict_func(_lowercase ) for obj in objs] else: return objs else: return objs
30
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class __a( _a ): """simple docstring""" lowerCAmelCase = '''philschmid/bart-large-cnn-samsum''' lowerCAmelCase = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) lowerCAmelCase = '''summarizer''' lowerCAmelCase = AutoTokenizer lowerCAmelCase = AutoModelForSeqaSeqLM lowerCAmelCase = ['''text'''] lowerCAmelCase = ['''text'''] def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: return self.pre_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ,truncation=_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> int: return self.model.generate(**_SCREAMING_SNAKE_CASE )[0] def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: return self.pre_processor.decode(_SCREAMING_SNAKE_CASE ,skip_special_tokens=_SCREAMING_SNAKE_CASE ,clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
30
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class __a( unittest.TestCase ): """simple docstring""" @slow def a__ ( self ) -> List[str]: UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) UpperCAmelCase_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(_SCREAMING_SNAKE_CASE ) from datasets import load_dataset UpperCAmelCase_ : Optional[int] = load_dataset('''nielsr/rvlcdip-demo''' ) UpperCAmelCase_ : Optional[Any] = dataset['''train'''][0]['''image'''].convert('''RGB''' ) UpperCAmelCase_ : str = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = outputs.logits UpperCAmelCase_ : Tuple = torch.Size((1, 16) ) self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = torch.tensor( [-0.41_58, -0.40_92, -0.43_47] ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.float ,) self.assertTrue(torch.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
30
1
def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): raise TypeError('''only integers accepted as input''' ) else: UpperCAmelCase_ : Tuple = str(abs(_lowercase ) ) UpperCAmelCase_ : Tuple = [list(_lowercase ) for char in range(len(_lowercase ) )] for index in range(len(_lowercase ) ): num_transpositions[index].pop(_lowercase ) return max( int(''''''.join(list(_lowercase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('doctest').testmod()
30
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __a = logging.get_logger(__name__) class __a( _a ): """simple docstring""" def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None: warnings.warn( '''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use CLIPImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,) super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
30
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __a = { 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __a( unittest.TestCase ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> Optional[int]: UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 30} UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30} UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : int = num_channels UpperCAmelCase_ : Any = min_resolution UpperCAmelCase_ : Tuple = max_resolution UpperCAmelCase_ : Optional[int] = do_resize_and_center_crop UpperCAmelCase_ : Tuple = size UpperCAmelCase_ : List[str] = crop_pct UpperCAmelCase_ : List[str] = crop_size UpperCAmelCase_ : Any = do_normalize UpperCAmelCase_ : str = image_mean UpperCAmelCase_ : List[Any] = image_std def a__ ( self ) -> str: return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a( _a , unittest.TestCase ): """simple docstring""" lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None def a__ ( self ) -> Dict: UpperCAmelCase_ : str = PoolFormerImageProcessingTester(self ) @property def a__ ( self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize_and_center_crop''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''crop_pct''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_mean''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_std''' ) ) def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'''shortest_edge''': 30} ) self.assertEqual(image_processor.crop_size ,{'''height''': 30, '''width''': 30} ) UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} ) def a__ ( self ) -> Optional[int]: pass def a__ ( self ) -> Dict: # Initialize image_processing UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image ) # Test not batched input UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) def a__ ( self ) -> List[Any]: # Initialize image_processing UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray ) # Test not batched input UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) def a__ ( self ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor ) # Test not batched input UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,)
30
1
def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[str] = len(_lowercase ) for _ in range(_lowercase ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: UpperCAmelCase_, UpperCAmelCase_ : int = arr[i + 1], arr[i] return arr if __name__ == "__main__": __a = list(range(10, 0, -1)) print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
30
import unittest import numpy as np def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ): '''simple docstring''' UpperCAmelCase_ : Dict = np.shape(_lowercase ) UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase ) UpperCAmelCase_ : Tuple = np.shape(_lowercase ) if shape_a[0] != shape_b[0]: UpperCAmelCase_ : Tuple = ( '''Expected the same number of rows for A and B. ''' f'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(_lowercase ) if shape_b[1] != shape_c[1]: UpperCAmelCase_ : List[Any] = ( '''Expected the same number of columns for B and C. ''' f'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(_lowercase ) UpperCAmelCase_ : Dict = pseudo_inv if a_inv is None: try: UpperCAmelCase_ : Any = np.linalg.inv(_lowercase ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __a( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> None: UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] ) UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] ) UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE ) self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s ) def a__ ( self ) -> None: UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> None: UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
30
1
def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' if not len(_lowercase ) == len(_lowercase ) == 3: raise ValueError('''Please enter a valid equation.''' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('''Both a & b of two equations can\'t be zero.''' ) # Extract the coefficients UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : int = equationa UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : str = equationa # Calculate the determinants of the matrices UpperCAmelCase_ : Union[str, Any] = aa * ba - aa * ba UpperCAmelCase_ : Tuple = ca * ba - ca * ba UpperCAmelCase_ : Tuple = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('''Infinite solutions. (Consistent system)''' ) else: raise ValueError('''No solution. (Inconsistent system)''' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: UpperCAmelCase_ : Optional[int] = determinant_x / determinant UpperCAmelCase_ : Tuple = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
30
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): UpperCAmelCase_ : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_lowercase ) UpperCAmelCase_ : Any = ''''''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data ) UpperCAmelCase_ : Any = len(_lowercase ) % 6 != 0 if padding_needed: # The padding that will be added later UpperCAmelCase_ : Union[str, Any] = B'''=''' * ((6 - len(_lowercase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_lowercase ) % 6) else: UpperCAmelCase_ : int = B'''''' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_lowercase ) , 6 ) ).encode() + padding ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ): UpperCAmelCase_ : Tuple = ( '''argument should be a bytes-like object or ASCII string, ''' f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_lowercase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_lowercase , _lowercase ): try: UpperCAmelCase_ : Any = encoded_data.decode('''utf-8''' ) except UnicodeDecodeError: raise ValueError('''base64 encoded data should only contain ASCII characters''' ) UpperCAmelCase_ : str = encoded_data.count('''=''' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one UpperCAmelCase_ : List[Any] = encoded_data[:-padding] UpperCAmelCase_ : List[Any] = ''''''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: UpperCAmelCase_ : Tuple = ''''''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data ) UpperCAmelCase_ : str = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_lowercase ) , 8 ) ] return bytes(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
30
1
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed __a = logging.getLogger(__name__) def lowerCamelCase__ ( _lowercase=2 , _lowercase=3 , _lowercase=16 , _lowercase = 10 , _lowercase = 2 ): '''simple docstring''' def get_dataset(_lowercase ): UpperCAmelCase_ : Optional[int] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(_lowercase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) UpperCAmelCase_ : Tuple = get_dataset(_lowercase ) UpperCAmelCase_ : List[Any] = get_dataset(_lowercase ) UpperCAmelCase_ : Optional[int] = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 ) UpperCAmelCase_ : int = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 ) return (train_dataloader, valid_dataloader) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ): '''simple docstring''' UpperCAmelCase_ : List[Any] = [] for epoch in range(_lowercase ): # Train quickly model.train() for batch in dataloader: UpperCAmelCase_, UpperCAmelCase_ : int = batch UpperCAmelCase_ : List[Any] = model(_lowercase ) UpperCAmelCase_ : Dict = torch.nn.functional.mse_loss(_lowercase , _lowercase ) accelerator.backward(_lowercase ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class __a( nn.Module ): """simple docstring""" def __init__( self ) -> Dict: super().__init__() UpperCAmelCase_ : List[str] = nn.Parameter(torch.randn(1 ) ) UpperCAmelCase_ : int = nn.Parameter(torch.randn(1 ) ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]: return x * self.a + self.b class __a( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) UpperCAmelCase_ : Tuple = DummyModel() UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) UpperCAmelCase_, UpperCAmelCase_ : str = dummy_dataloaders() UpperCAmelCase_ : Any = ProjectConfiguration(total_limit=1 ,project_dir=_SCREAMING_SNAKE_CASE ,automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE ) # Train baseline UpperCAmelCase_ : str = Accelerator(project_config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) ,1 ) def a__ ( self ) -> Any: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) UpperCAmelCase_ : Optional[int] = DummyModel() UpperCAmelCase_ : Union[str, Any] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = dummy_dataloaders() # Train baseline UpperCAmelCase_ : Union[str, Any] = Accelerator() UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Save initial UpperCAmelCase_ : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE ,'''initial''' ) accelerator.save_state(_SCREAMING_SNAKE_CASE ) ((UpperCAmelCase_), (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item() UpperCAmelCase_ : List[Any] = optimizer.state_dict() UpperCAmelCase_ : Optional[Any] = train(3 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ((UpperCAmelCase_), (UpperCAmelCase_)) : int = model.a.item(), model.b.item() UpperCAmelCase_ : Tuple = optimizer.state_dict() # Train partially set_seed(42 ) UpperCAmelCase_ : Tuple = DummyModel() UpperCAmelCase_ : Dict = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) UpperCAmelCase_, UpperCAmelCase_ : List[Any] = dummy_dataloaders() UpperCAmelCase_ : List[str] = Accelerator() UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = accelerator.prepare( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) accelerator.load_state(_SCREAMING_SNAKE_CASE ) ((UpperCAmelCase_), (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item() UpperCAmelCase_ : Any = optimizer.state_dict() self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = train(2 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Save everything UpperCAmelCase_ : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE ,'''checkpoint''' ) accelerator.save_state(_SCREAMING_SNAKE_CASE ) # Load everything back in and make sure all states work accelerator.load_state(_SCREAMING_SNAKE_CASE ) test_rands += train(1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ((UpperCAmelCase_), (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item() UpperCAmelCase_ : Any = optimizer.state_dict() self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) UpperCAmelCase_ : str = DummyModel() UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) UpperCAmelCase_, UpperCAmelCase_ : int = dummy_dataloaders() UpperCAmelCase_ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE ) # Train baseline UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_SCREAMING_SNAKE_CASE ,project_config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : str = accelerator.prepare( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Save initial accelerator.save_state() ((UpperCAmelCase_), (UpperCAmelCase_)) : int = model.a.item(), model.b.item() UpperCAmelCase_ : List[Any] = optimizer.state_dict() UpperCAmelCase_ : int = train(3 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ((UpperCAmelCase_), (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item() UpperCAmelCase_ : List[str] = optimizer.state_dict() # Train partially set_seed(42 ) UpperCAmelCase_ : Optional[int] = DummyModel() UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = dummy_dataloaders() UpperCAmelCase_ : List[str] = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = Accelerator(project_dir=_SCREAMING_SNAKE_CASE ,project_config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = accelerator.prepare( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) accelerator.load_state(os.path.join(_SCREAMING_SNAKE_CASE ,'''checkpoints''' ,'''checkpoint_0''' ) ) ((UpperCAmelCase_), (UpperCAmelCase_)) : Dict = model.a.item(), model.b.item() UpperCAmelCase_ : List[Any] = optimizer.state_dict() self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = train(2 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_SCREAMING_SNAKE_CASE ,'''checkpoints''' ,'''checkpoint_1''' ) ) test_rands += train(1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ((UpperCAmelCase_), (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item() UpperCAmelCase_ : str = optimizer.state_dict() self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : str = torch.tensor([1, 2, 3] ) UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] ) UpperCAmelCase_ : int = DummyModel() UpperCAmelCase_ : Tuple = torch.optim.Adam(net.parameters() ) UpperCAmelCase_ : Optional[int] = Accelerator() with self.assertRaises(_SCREAMING_SNAKE_CASE ) as ve: accelerator.register_for_checkpointing(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = str(ve.exception ) self.assertTrue('''Item at index 0''' in message ) self.assertTrue('''Item at index 1''' in message ) self.assertFalse('''Item at index 2''' in message ) self.assertFalse('''Item at index 3''' in message ) def a__ ( self ) -> List[Any]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) UpperCAmelCase_ : Any = DummyModel() UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) UpperCAmelCase_ : Union[str, Any] = torch.optim.lr_scheduler.StepLR(_SCREAMING_SNAKE_CASE ,step_size=1 ,gamma=0.99 ) UpperCAmelCase_, UpperCAmelCase_ : Tuple = dummy_dataloaders() UpperCAmelCase_ : str = ProjectConfiguration(automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE ) # Train baseline UpperCAmelCase_ : List[str] = Accelerator(project_dir=_SCREAMING_SNAKE_CASE ,project_config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : List[Any] = accelerator.prepare( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Save initial accelerator.save_state() UpperCAmelCase_ : int = scheduler.state_dict() train(3 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) self.assertNotEqual(_SCREAMING_SNAKE_CASE ,scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_SCREAMING_SNAKE_CASE ,'''checkpoints''' ,'''checkpoint_0''' ) ) self.assertEqual(_SCREAMING_SNAKE_CASE ,scheduler.state_dict() ) def a__ ( self ) -> str: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) UpperCAmelCase_ : List[str] = DummyModel() UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE ,total_limit=2 ) # Train baseline UpperCAmelCase_ : Tuple = Accelerator(project_dir=_SCREAMING_SNAKE_CASE ,project_config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(_SCREAMING_SNAKE_CASE ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE ,'''checkpoints''' ,'''checkpoint_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE ,'''checkpoints''' ,'''checkpoint_9''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE ,'''checkpoints''' ,'''checkpoint_10''' ) ) ) @require_cuda def a__ ( self ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(_SCREAMING_SNAKE_CASE ,env=os.environ.copy() ) if __name__ == "__main__": __a = '/tmp/accelerate/state_checkpointing' __a = DummyModel() __a = torch.optim.Adam(params=model.parameters(), lr=1E-3) __a = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) __a ,__a = dummy_dataloaders() __a = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline __a = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) __a ,__a ,__a ,__a ,__a = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) __a ,__a = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: __a = group['params'][0].device break assert param_device.type == accelerator.device.type __a = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: __a = group['params'][0].device break assert ( param_device.type == torch.device('cpu').type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: __a = group['params'][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
30
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __a( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = 0 @slow def a__ ( self ) -> Any: for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 ) def a__ ( self ) -> Optional[Any]: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size ,12 ) def a__ ( self ) -> Tuple: UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size ,20 ) def a__ ( self ) -> List[str]: UpperCAmelCase_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Check that tokenizer_type ≠ model_type UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size ,12 ) def a__ ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) ) UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) ) UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) @require_tokenizers def a__ ( self ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) ) UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) ) UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> int: with pytest.raises(_SCREAMING_SNAKE_CASE ): AutoTokenizer.from_pretrained('''./''' ,tokenizer_type='''xxx''' ) @require_tokenizers def a__ ( self ) -> Optional[Any]: for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: UpperCAmelCase_ : Any = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE ) else: self.assertEqual(tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.model_max_length ,512 ) @require_tokenizers def a__ ( self ) -> List[Any]: for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' ,): UpperCAmelCase_ : int = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def a__ ( self ) -> Optional[Any]: # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai UpperCAmelCase_ : int = TOKENIZER_MAPPING.values() UpperCAmelCase_ : List[Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_SCREAMING_SNAKE_CASE ) @require_tokenizers def a__ ( self ) -> Tuple: self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ,use_fast=_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) ,_SCREAMING_SNAKE_CASE ) @require_tokenizers def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' ,do_lower_case=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = '''Hello, world. How are you?''' UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertEqual('''[UNK]''' ,tokens[0] ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' ,do_lower_case=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertEqual('''[UNK]''' ,tokens[0] ) @require_tokenizers def a__ ( self ) -> Dict: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.model_max_length ,512 ) self.assertEqual(tokenizer.vocab_size ,30_000 ) self.assertEqual(tokenizer.unk_token ,'''[UNK]''' ) self.assertEqual(tokenizer.padding_side ,'''right''' ) self.assertEqual(tokenizer.truncation_side ,'''right''' ) def a__ ( self ) -> Dict: UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size ,12 ) def a__ ( self ) -> Optional[Any]: UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> str: # Check we can load the tokenizer config of an online model. UpperCAmelCase_ : int = get_tokenizer_config('''bert-base-cased''' ) UpperCAmelCase_ : Optional[int] = config.pop('''_commit_hash''' ,_SCREAMING_SNAKE_CASE ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_SCREAMING_SNAKE_CASE ,{'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. UpperCAmelCase_ : Any = get_tokenizer_config(_SCREAMING_SNAKE_CASE ) self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = get_tokenizer_config(_SCREAMING_SNAKE_CASE ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] ,'''BertTokenizer''' ) def a__ ( self ) -> str: try: AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_SCREAMING_SNAKE_CASE ): AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = CustomTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def a__ ( self ) -> int: try: AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE ) # Can register in two steps AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_SCREAMING_SNAKE_CASE ): AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE ) bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def a__ ( self ) -> Optional[int]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' ) @require_tokenizers def a__ ( self ) -> Optional[int]: class __a( _a ): """simple docstring""" lowerCAmelCase = False class __a( _a ): """simple docstring""" lowerCAmelCase = NewTokenizer lowerCAmelCase = False try: AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def a__ ( self ) -> int: UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) def a__ ( self ) -> Optional[Any]: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' ) def a__ ( self ) -> List[Any]: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' ) def a__ ( self ) -> Any: # Make sure we have cached the tokenizer. UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count ,0 ) self.assertEqual(counter.head_request_count ,1 ) self.assertEqual(counter.other_request_count ,0 )
30
1
import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint __a = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } __a = { '169M': 768, '430M': 1_024, '1B5': 2_048, '3B': 2_560, '7B': 4_096, '14B': 5_120, } def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = list(state_dict.keys() ) for name in state_dict_keys: UpperCAmelCase_ : Union[str, Any] = state_dict.pop(_lowercase ) # emb -> embedding if name.startswith('''emb.''' ): UpperCAmelCase_ : Any = name.replace('''emb.''' , '''embeddings.''' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('''blocks.0.ln0''' ): UpperCAmelCase_ : Optional[int] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' ) # att -> attention UpperCAmelCase_ : str = re.sub(r'''blocks\.(\d+)\.att''' , r'''blocks.\1.attention''' , _lowercase ) # ffn -> feed_forward UpperCAmelCase_ : Tuple = re.sub(r'''blocks\.(\d+)\.ffn''' , r'''blocks.\1.feed_forward''' , _lowercase ) # time_mix_k -> time_mix_key and reshape if name.endswith('''.time_mix_k''' ): UpperCAmelCase_ : Union[str, Any] = name.replace('''.time_mix_k''' , '''.time_mix_key''' ) # time_mix_v -> time_mix_value and reshape if name.endswith('''.time_mix_v''' ): UpperCAmelCase_ : Tuple = name.replace('''.time_mix_v''' , '''.time_mix_value''' ) # time_mix_r -> time_mix_key and reshape if name.endswith('''.time_mix_r''' ): UpperCAmelCase_ : Optional[Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' ) if name != "head.weight": UpperCAmelCase_ : Tuple = '''rwkv.''' + name UpperCAmelCase_ : Dict = weight return state_dict def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=None ): '''simple docstring''' if tokenizer_file is None: print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' ) UpperCAmelCase_ : Tuple = 50277 UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' ) else: UpperCAmelCase_ : Union[str, Any] = PreTrainedTokenizerFast(tokenizer_file=_lowercase ) UpperCAmelCase_ : List[Any] = len(_lowercase ) tokenizer.save_pretrained(_lowercase ) # 2. Build the config UpperCAmelCase_ : List[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: UpperCAmelCase_ : List[Any] = candidate break if size is None: raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' ) if size not in possible_sizes: raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' ) UpperCAmelCase_ : Optional[int] = RwkvConfig( vocab_size=_lowercase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(_lowercase ) # 3. Download model file then convert state_dict UpperCAmelCase_ : Dict = hf_hub_download(_lowercase , _lowercase ) UpperCAmelCase_ : Optional[Any] = torch.load(_lowercase , map_location='''cpu''' ) UpperCAmelCase_ : Optional[int] = convert_state_dict(_lowercase ) # 4. Split in shards and save UpperCAmelCase_, UpperCAmelCase_ : int = shard_checkpoint(_lowercase ) for shard_file, shard in shards.items(): torch.save(_lowercase , os.path.join(_lowercase , _lowercase ) ) if index is not None: UpperCAmelCase_ : Any = os.path.join(_lowercase , _lowercase ) # Save the index as well with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: UpperCAmelCase_ : List[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n''' f.write(_lowercase ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( '''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' ) UpperCAmelCase_ : Tuple = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: UpperCAmelCase_ : Optional[Any] = torch.load(os.path.join(_lowercase , _lowercase ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_lowercase , _lowercase ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' ) UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_lowercase ) model.push_to_hub(_lowercase , max_shard_size='''2GB''' ) tokenizer.push_to_hub(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) __a = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
30
from functools import reduce __a = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def lowerCamelCase__ ( _lowercase = N ): '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) ) for i in range(len(_lowercase ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
30
1
__a = [0, 2, 4, 6, 8] __a = [1, 3, 5, 7, 9] def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 UpperCAmelCase_ : Any = 0 for digit in range(10 ): UpperCAmelCase_ : int = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , _lowercase , _lowercase ) return result UpperCAmelCase_ : Optional[int] = 0 for digita in range(10 ): UpperCAmelCase_ : Tuple = digita if (remainder + digita) % 2 == 0: UpperCAmelCase_ : Union[str, Any] = ODD_DIGITS else: UpperCAmelCase_ : int = EVEN_DIGITS for digita in other_parity_digits: UpperCAmelCase_ : Any = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , _lowercase , _lowercase , ) return result def lowerCamelCase__ ( _lowercase = 9 ): '''simple docstring''' UpperCAmelCase_ : List[Any] = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(_lowercase , 0 , [0] * length , _lowercase ) return result if __name__ == "__main__": print(F"""{solution() = }""")
30
from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) UpperCAmelCase_ : Tuple = precision UpperCAmelCase_ : Optional[Any] = ceil(precision / 14 ) UpperCAmelCase_ : int = 426880 * Decimal(10005 ).sqrt() UpperCAmelCase_ : Tuple = 1 UpperCAmelCase_ : List[Any] = 13591409 UpperCAmelCase_ : Optional[Any] = Decimal(_lowercase ) for k in range(1 , _lowercase ): UpperCAmelCase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": __a = 50 print(F"""The first {n} digits of pi is: {pi(n)}""")
30
1
import colorsys from PIL import Image # type: ignore def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = x UpperCAmelCase_ : List[str] = y for step in range(_lowercase ): # noqa: B007 UpperCAmelCase_ : Union[str, Any] = a * a - b * b + x UpperCAmelCase_ : List[Any] = 2 * a * b + y UpperCAmelCase_ : List[Any] = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_lowercase , 1 , 1 ) ) def lowerCamelCase__ ( _lowercase = 800 , _lowercase = 600 , _lowercase = -0.6 , _lowercase = 0 , _lowercase = 3.2 , _lowercase = 50 , _lowercase = True , ): '''simple docstring''' UpperCAmelCase_ : int = Image.new('''RGB''' , (image_width, image_height) ) UpperCAmelCase_ : List[str] = img.load() # loop through the image-coordinates for image_x in range(_lowercase ): for image_y in range(_lowercase ): # determine the figure-coordinates based on the image-coordinates UpperCAmelCase_ : str = figure_width / image_width * image_height UpperCAmelCase_ : Dict = figure_center_x + (image_x / image_width - 0.5) * figure_width UpperCAmelCase_ : Any = figure_center_y + (image_y / image_height - 0.5) * figure_height UpperCAmelCase_ : int = get_distance(_lowercase , _lowercase , _lowercase ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: UpperCAmelCase_ : Dict = get_color_coded_rgb(_lowercase ) else: UpperCAmelCase_ : int = get_black_and_white_rgb(_lowercase ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure __a = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
30
from __future__ import annotations import math __a = '2020.9.26' __a = 'xcodz-dot, cclaus, dhruvmanila' def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ): UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}''' raise TypeError(_lowercase ) UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): raise TypeError('''Axis must be a str''' ) UpperCAmelCase_ : Optional[Any] = locals() del input_variables["axis"] if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ): UpperCAmelCase_ : List[Any] = ( '''Input values except axis must either be float or int: ''' f'''{list(input_variables.values() )}''' ) raise TypeError(_lowercase ) UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi if axis == "z": UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase ) UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = z elif axis == "x": UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase ) UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase ) UpperCAmelCase_ : Dict = x elif axis == "y": UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = y else: raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""") print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
30
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __a = { 'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'LongT5EncoderModel', 'LongT5ForConditionalGeneration', 'LongT5Model', 'LongT5PreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'FlaxLongT5ForConditionalGeneration', 'FlaxLongT5Model', 'FlaxLongT5PreTrainedModel', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __a = '2.13.1' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('3.7'): raise ImportWarning( 'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( 'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n' 'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __a = concatenate_datasets __a = DownloadConfig __a = DownloadManager __a = DownloadMode __a = DownloadConfig __a = DownloadMode __a = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
30
1
from __future__ import annotations def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = 0.00 UpperCAmelCase_ : Tuple = 0 for resistor in resistors: if resistor <= 0: UpperCAmelCase_ : List[Any] = f'''Resistor at index {index} has a negative or zero value!''' raise ValueError(_lowercase ) first_sum += 1 / float(_lowercase ) index += 1 return 1 / first_sum def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Tuple = 0.00 UpperCAmelCase_ : str = 0 for resistor in resistors: sum_r += resistor if resistor < 0: UpperCAmelCase_ : Tuple = f'''Resistor at index {index} has a negative value!''' raise ValueError(_lowercase ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
30
def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' while a != 0: UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = b % a, a return b def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' if gcd(_lowercase , _lowercase ) != 1: UpperCAmelCase_ : int = f'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(_lowercase ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = 1, 0, a UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = 0, 1, m while va != 0: UpperCAmelCase_ : List[Any] = ua // va UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
30
1
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ): '''simple docstring''' UpperCAmelCase_ : Tuple = tau * frequency / samplerate UpperCAmelCase_ : int = sin(_lowercase ) UpperCAmelCase_ : List[str] = cos(_lowercase ) UpperCAmelCase_ : Optional[Any] = _sin / (2 * q_factor) UpperCAmelCase_ : str = (1 - _cos) / 2 UpperCAmelCase_ : Dict = 1 - _cos UpperCAmelCase_ : Dict = 1 + alpha UpperCAmelCase_ : Any = -2 * _cos UpperCAmelCase_ : List[Any] = 1 - alpha UpperCAmelCase_ : List[str] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ): '''simple docstring''' UpperCAmelCase_ : Any = tau * frequency / samplerate UpperCAmelCase_ : Optional[int] = sin(_lowercase ) UpperCAmelCase_ : Optional[Any] = cos(_lowercase ) UpperCAmelCase_ : List[str] = _sin / (2 * q_factor) UpperCAmelCase_ : Tuple = (1 + _cos) / 2 UpperCAmelCase_ : Any = -1 - _cos UpperCAmelCase_ : Dict = 1 + alpha UpperCAmelCase_ : Optional[Any] = -2 * _cos UpperCAmelCase_ : Optional[int] = 1 - alpha UpperCAmelCase_ : Dict = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ): '''simple docstring''' UpperCAmelCase_ : Tuple = tau * frequency / samplerate UpperCAmelCase_ : Union[str, Any] = sin(_lowercase ) UpperCAmelCase_ : int = cos(_lowercase ) UpperCAmelCase_ : Tuple = _sin / (2 * q_factor) UpperCAmelCase_ : List[str] = _sin / 2 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : str = -ba UpperCAmelCase_ : Optional[int] = 1 + alpha UpperCAmelCase_ : Optional[int] = -2 * _cos UpperCAmelCase_ : Any = 1 - alpha UpperCAmelCase_ : Dict = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = tau * frequency / samplerate UpperCAmelCase_ : List[Any] = sin(_lowercase ) UpperCAmelCase_ : Optional[int] = cos(_lowercase ) UpperCAmelCase_ : Union[str, Any] = _sin / (2 * q_factor) UpperCAmelCase_ : Any = 1 - alpha UpperCAmelCase_ : int = -2 * _cos UpperCAmelCase_ : List[str] = 1 + alpha UpperCAmelCase_ : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) , ): '''simple docstring''' UpperCAmelCase_ : List[str] = tau * frequency / samplerate UpperCAmelCase_ : int = sin(_lowercase ) UpperCAmelCase_ : int = cos(_lowercase ) UpperCAmelCase_ : Tuple = _sin / (2 * q_factor) UpperCAmelCase_ : Dict = 10 ** (gain_db / 40) UpperCAmelCase_ : List[str] = 1 + alpha * big_a UpperCAmelCase_ : Tuple = -2 * _cos UpperCAmelCase_ : Tuple = 1 - alpha * big_a UpperCAmelCase_ : Tuple = 1 + alpha / big_a UpperCAmelCase_ : Optional[Any] = -2 * _cos UpperCAmelCase_ : Dict = 1 - alpha / big_a UpperCAmelCase_ : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) , ): '''simple docstring''' UpperCAmelCase_ : List[str] = tau * frequency / samplerate UpperCAmelCase_ : Dict = sin(_lowercase ) UpperCAmelCase_ : List[str] = cos(_lowercase ) UpperCAmelCase_ : str = _sin / (2 * q_factor) UpperCAmelCase_ : str = 10 ** (gain_db / 40) UpperCAmelCase_ : Tuple = (big_a + 1) - (big_a - 1) * _cos UpperCAmelCase_ : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos UpperCAmelCase_ : str = (big_a - 1) - (big_a + 1) * _cos UpperCAmelCase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos UpperCAmelCase_ : int = 2 * sqrt(_lowercase ) * alpha UpperCAmelCase_ : Optional[Any] = big_a * (pmc + aaa) UpperCAmelCase_ : List[str] = 2 * big_a * mpc UpperCAmelCase_ : Tuple = big_a * (pmc - aaa) UpperCAmelCase_ : Optional[Any] = ppmc + aaa UpperCAmelCase_ : List[str] = -2 * pmpc UpperCAmelCase_ : Dict = ppmc - aaa UpperCAmelCase_ : str = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) , ): '''simple docstring''' UpperCAmelCase_ : Any = tau * frequency / samplerate UpperCAmelCase_ : Dict = sin(_lowercase ) UpperCAmelCase_ : int = cos(_lowercase ) UpperCAmelCase_ : str = _sin / (2 * q_factor) UpperCAmelCase_ : Tuple = 10 ** (gain_db / 40) UpperCAmelCase_ : int = (big_a + 1) - (big_a - 1) * _cos UpperCAmelCase_ : Tuple = (big_a + 1) + (big_a - 1) * _cos UpperCAmelCase_ : str = (big_a - 1) - (big_a + 1) * _cos UpperCAmelCase_ : Dict = (big_a - 1) + (big_a + 1) * _cos UpperCAmelCase_ : Any = 2 * sqrt(_lowercase ) * alpha UpperCAmelCase_ : Union[str, Any] = big_a * (ppmc + aaa) UpperCAmelCase_ : int = -2 * big_a * pmpc UpperCAmelCase_ : Union[str, Any] = big_a * (ppmc - aaa) UpperCAmelCase_ : Optional[Any] = pmc + aaa UpperCAmelCase_ : Union[str, Any] = 2 * mpc UpperCAmelCase_ : str = pmc - aaa UpperCAmelCase_ : str = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
30
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __a( _a ): """simple docstring""" lowerCAmelCase = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) lowerCAmelCase = '''CIDAS/clipseg-rd64-refined''' lowerCAmelCase = '''image_segmenter''' lowerCAmelCase = CLIPSegForImageSegmentation lowerCAmelCase = ['''image''', '''text'''] lowerCAmelCase = ['''image'''] def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict: requires_backends(self ,['''vision'''] ) super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: return self.pre_processor(text=[label] ,images=[image] ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str: with torch.no_grad(): UpperCAmelCase_ : Dict = self.model(**_SCREAMING_SNAKE_CASE ).logits return logits def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict: UpperCAmelCase_ : Dict = outputs.cpu().detach().numpy() UpperCAmelCase_ : Any = 0 UpperCAmelCase_ : List[Any] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
30
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType __a = logging.get_logger(__name__) __a = { 'openai/imagegpt-small': '', 'openai/imagegpt-medium': '', 'openai/imagegpt-large': '', } class __a( _a ): """simple docstring""" lowerCAmelCase = '''imagegpt''' lowerCAmelCase = ['''past_key_values'''] lowerCAmelCase = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]: UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Union[str, Any] = n_positions UpperCAmelCase_ : Union[str, Any] = n_embd UpperCAmelCase_ : Any = n_layer UpperCAmelCase_ : Optional[Any] = n_head UpperCAmelCase_ : Union[str, Any] = n_inner UpperCAmelCase_ : List[Any] = activation_function UpperCAmelCase_ : List[str] = resid_pdrop UpperCAmelCase_ : str = embd_pdrop UpperCAmelCase_ : Optional[Any] = attn_pdrop UpperCAmelCase_ : Dict = layer_norm_epsilon UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Dict = scale_attn_weights UpperCAmelCase_ : Any = use_cache UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx UpperCAmelCase_ : Tuple = reorder_and_upcast_attn UpperCAmelCase_ : int = tie_word_embeddings super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) class __a( _a ): """simple docstring""" @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ] ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]: UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) ) return inputs
30
import numpy as np import datasets __a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' __a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' __a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __a( datasets.Metric ): """simple docstring""" def a__ ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''' ,id='''sequence''' ) ,id='''X''' ), } ) ,) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: # convert to numpy arrays UpperCAmelCase_ : str = np.array(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.array(_SCREAMING_SNAKE_CASE ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('''Expected `X` to be a 2D vector''' ) if len(reference_distribution.shape ) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''' ) if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' ) # Get mahalanobis distance for each prediction UpperCAmelCase_ : List[str] = X - np.mean(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = np.cov(reference_distribution.T ) try: UpperCAmelCase_ : Any = np.linalg.inv(_SCREAMING_SNAKE_CASE ) except np.linalg.LinAlgError: UpperCAmelCase_ : List[str] = np.linalg.pinv(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.dot(_SCREAMING_SNAKE_CASE ,X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
30
1
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def lowerCamelCase__ ( _lowercase=None ): '''simple docstring''' if subparsers is not None: UpperCAmelCase_ : Optional[Any] = subparsers.add_parser('''env''' ) else: UpperCAmelCase_ : List[str] = argparse.ArgumentParser('''Accelerate env command''' ) parser.add_argument( '''--config_file''' , default=_lowercase , help='''The config file to use for the default values in the launching script.''' ) if subparsers is not None: parser.set_defaults(func=_lowercase ) return parser def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Dict = torch.__version__ UpperCAmelCase_ : Optional[Any] = torch.cuda.is_available() UpperCAmelCase_ : int = is_xpu_available() UpperCAmelCase_ : Union[str, Any] = is_npu_available() UpperCAmelCase_ : Optional[Any] = '''Not found''' # Get the default from the config file. if args.config_file is not None or os.path.isfile(_lowercase ): UpperCAmelCase_ : List[Any] = load_config_from_file(args.config_file ).to_dict() UpperCAmelCase_ : Tuple = { '''`Accelerate` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Numpy version''': np.__version__, '''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''', '''PyTorch XPU available''': str(_lowercase ), '''PyTorch NPU available''': str(_lowercase ), '''System RAM''': f'''{psutil.virtual_memory().total / 1024 ** 3:.2f} GB''', } if pt_cuda_available: UpperCAmelCase_ : int = torch.cuda.get_device_name() print('''\nCopy-and-paste the text below in your GitHub issue\n''' ) print('''\n'''.join([f'''- {prop}: {val}''' for prop, val in info.items()] ) ) print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' ) UpperCAmelCase_ : Optional[int] = ( '''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(_lowercase , _lowercase ) else f'''\t{accelerate_config}''' ) print(_lowercase ) UpperCAmelCase_ : int = accelerate_config return info def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : List[str] = env_command_parser() UpperCAmelCase_ : Tuple = parser.parse_args() env_command(_lowercase ) return 0 if __name__ == "__main__": raise SystemExit(main())
30
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __a = logging.get_logger(__name__) __a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __a = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'facebook/dpr-ctx_encoder-single-nq-base': 512, 'facebook/dpr-ctx_encoder-multiset-base': 512, } __a = { 'facebook/dpr-question_encoder-single-nq-base': 512, 'facebook/dpr-question_encoder-multiset-base': 512, } __a = { 'facebook/dpr-reader-single-nq-base': 512, 'facebook/dpr-reader-multiset-base': 512, } __a = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } __a = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } __a = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class __a( _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __a( _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __a = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) __a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) __a = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(_a ) class __a: """simple docstring""" def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding: if titles is None and texts is None: return super().__call__( _SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) elif titles is None or texts is None: UpperCAmelCase_ : List[str] = titles if texts is None else texts return super().__call__( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : List[Any] = titles if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [titles] UpperCAmelCase_ : List[str] = texts if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [texts] UpperCAmelCase_ : Any = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = questions if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [questions] * n_passages if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError( f'''There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.''' ) UpperCAmelCase_ : Tuple = super().__call__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids'''] UpperCAmelCase_ : int = super().__call__(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids'''] UpperCAmelCase_ : Optional[int] = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ] } if return_attention_mask is not False: UpperCAmelCase_ : List[str] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) UpperCAmelCase_ : Dict = attention_mask return self.pad(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 4 ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Tuple = reader_input['''input_ids'''] UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = reader_output[:3] UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = sorted(range(_SCREAMING_SNAKE_CASE ) ,reverse=_SCREAMING_SNAKE_CASE ,key=relevance_logits.__getitem__ ) UpperCAmelCase_ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: UpperCAmelCase_ : List[Any] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id ) else: UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_SCREAMING_SNAKE_CASE ,top_spans=_SCREAMING_SNAKE_CASE ,) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_SCREAMING_SNAKE_CASE ,start_index=_SCREAMING_SNAKE_CASE ,end_index=_SCREAMING_SNAKE_CASE ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) ) if len(_SCREAMING_SNAKE_CASE ) >= num_spans: break return nbest_spans_predictions[:num_spans] def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Tuple = [] for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) UpperCAmelCase_ : int = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ,reverse=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' ) UpperCAmelCase_ : str = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_SCREAMING_SNAKE_CASE ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_a ) class __a( _a , _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase = ['''input_ids''', '''attention_mask''']
30
1
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __a: """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=[8, 16, 32, 64] ,_SCREAMING_SNAKE_CASE=[1, 1, 2, 1] ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE="relu" ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] ,_SCREAMING_SNAKE_CASE=[2, 3, 4] ,_SCREAMING_SNAKE_CASE=1 ,) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = parent UpperCAmelCase_ : Any = batch_size UpperCAmelCase_ : Dict = image_size UpperCAmelCase_ : Optional[Any] = num_channels UpperCAmelCase_ : List[str] = embeddings_size UpperCAmelCase_ : Dict = hidden_sizes UpperCAmelCase_ : List[str] = depths UpperCAmelCase_ : List[Any] = is_training UpperCAmelCase_ : Dict = use_labels UpperCAmelCase_ : List[Any] = hidden_act UpperCAmelCase_ : Dict = num_labels UpperCAmelCase_ : int = scope UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = out_features UpperCAmelCase_ : Tuple = out_indices UpperCAmelCase_ : Union[str, Any] = num_groups def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : List[str] = None if self.use_labels: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] ,self.num_labels ) UpperCAmelCase_ : List[Any] = self.get_config() return config, pixel_values, labels def a__ ( self ) -> List[Any]: return BitConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict: UpperCAmelCase_ : List[Any] = BitModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: UpperCAmelCase_ : Optional[int] = self.num_labels UpperCAmelCase_ : List[str] = BitForImageClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: UpperCAmelCase_ : str = BitBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCAmelCase_ : Dict = None UpperCAmelCase_ : Tuple = BitBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def a__ ( self ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs() UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = config_and_inputs UpperCAmelCase_ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __a( _a , _a , unittest.TestCase ): """simple docstring""" lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () lowerCAmelCase = ( {'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification} if is_torch_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def a__ ( self ) -> int: UpperCAmelCase_ : Tuple = BitModelTester(self ) UpperCAmelCase_ : Tuple = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,has_text_modality=_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self ) -> Any: return @unittest.skip(reason='''Bit does not output attentions''' ) def a__ ( self ) -> Union[str, Any]: pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def a__ ( self ) -> List[str]: pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def a__ ( self ) -> Optional[int]: pass def a__ ( self ) -> int: UpperCAmelCase_, UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : List[str] = [*signature.parameters.keys()] UpperCAmelCase_ : Optional[int] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> List[Any]: UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> List[str]: UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> int: UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = model_class(config=_SCREAMING_SNAKE_CASE ) for name, module in model.named_modules(): if isinstance(_SCREAMING_SNAKE_CASE ,(nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,) self.assertTrue( torch.all(module.bias == 0 ) ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,) def a__ ( self ) -> Optional[int]: def check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Dict = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): UpperCAmelCase_ : str = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase_ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ : Tuple = self.model_tester.num_stages self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Any = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCAmelCase_ : List[str] = layer_type UpperCAmelCase_ : Optional[Any] = True check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Optional[Any] = True check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def a__ ( self ) -> List[str]: pass def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE ) @slow def a__ ( self ) -> Optional[int]: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = BitModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __a( unittest.TestCase ): """simple docstring""" @cached_property def a__ ( self ) -> Optional[Any]: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def a__ ( self ) -> List[Any]: UpperCAmelCase_ : int = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = self.default_image_processor UpperCAmelCase_ : int = prepare_img() UpperCAmelCase_ : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE ) # verify the logits UpperCAmelCase_ : Dict = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) @require_torch class __a( _a , unittest.TestCase ): """simple docstring""" lowerCAmelCase = (BitBackbone,) if is_torch_available() else () lowerCAmelCase = BitConfig lowerCAmelCase = False def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : Dict = BitModelTester(self )
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __a = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class __a( _a ): """simple docstring""" lowerCAmelCase = '''wav2vec2''' def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="sum" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]: super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = hidden_size UpperCAmelCase_ : Tuple = feat_extract_norm UpperCAmelCase_ : List[Any] = feat_extract_activation UpperCAmelCase_ : str = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = conv_bias UpperCAmelCase_ : str = num_conv_pos_embeddings UpperCAmelCase_ : Any = num_conv_pos_embedding_groups UpperCAmelCase_ : Tuple = len(self.conv_dim ) UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Dict = intermediate_size UpperCAmelCase_ : Any = hidden_act UpperCAmelCase_ : Any = num_attention_heads UpperCAmelCase_ : str = hidden_dropout UpperCAmelCase_ : int = attention_dropout UpperCAmelCase_ : Tuple = activation_dropout UpperCAmelCase_ : List[str] = feat_proj_dropout UpperCAmelCase_ : int = final_dropout UpperCAmelCase_ : Union[str, Any] = layerdrop UpperCAmelCase_ : Optional[Any] = layer_norm_eps UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : List[str] = vocab_size UpperCAmelCase_ : Optional[int] = do_stable_layer_norm UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase_ : Optional[int] = apply_spec_augment UpperCAmelCase_ : Tuple = mask_time_prob UpperCAmelCase_ : Optional[Any] = mask_time_length UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks UpperCAmelCase_ : Optional[Any] = mask_feature_prob UpperCAmelCase_ : str = mask_feature_length UpperCAmelCase_ : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations UpperCAmelCase_ : Union[str, Any] = num_codevectors_per_group UpperCAmelCase_ : Any = num_codevector_groups UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature UpperCAmelCase_ : List[str] = feat_quantizer_dropout UpperCAmelCase_ : Dict = num_negatives UpperCAmelCase_ : List[str] = codevector_dim UpperCAmelCase_ : List[str] = proj_codevector_dim UpperCAmelCase_ : str = diversity_loss_weight # ctc loss UpperCAmelCase_ : List[Any] = ctc_loss_reduction UpperCAmelCase_ : List[str] = ctc_zero_infinity # adapter UpperCAmelCase_ : Optional[Any] = add_adapter UpperCAmelCase_ : Any = adapter_kernel_size UpperCAmelCase_ : Optional[int] = adapter_stride UpperCAmelCase_ : List[Any] = num_adapter_layers UpperCAmelCase_ : Optional[Any] = output_hidden_size or hidden_size UpperCAmelCase_ : Optional[int] = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. UpperCAmelCase_ : List[str] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. UpperCAmelCase_ : List[str] = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = xvector_output_dim @property def a__ ( self ) -> Any: return functools.reduce(operator.mul ,self.conv_stride ,1 )
30
1
__a = 'Input must be a string of 8 numbers plus letter' __a = 'TRWAGMYFPDXBNJZSQVHLCKE' def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): UpperCAmelCase_ : List[Any] = f'''Expected string as input, found {type(_lowercase ).__name__}''' raise TypeError(_lowercase ) UpperCAmelCase_ : Tuple = spanish_id.replace('''-''' , '''''' ).upper() if len(_lowercase ) != 9: raise ValueError(_lowercase ) try: UpperCAmelCase_ : str = int(spanish_id_clean[0:8] ) UpperCAmelCase_ : Optional[int] = spanish_id_clean[8] except ValueError as ex: raise ValueError(_lowercase ) from ex if letter.isdigit(): raise ValueError(_lowercase ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['NllbTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['NllbTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json', 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json' ), 'distilbert-base-uncased-finetuned-sst-2-english': ( 'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json' ), } class __a( _a ): """simple docstring""" lowerCAmelCase = '''distilbert''' lowerCAmelCase = { '''hidden_size''': '''dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', } def __init__( self ,_SCREAMING_SNAKE_CASE=30_522 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=6 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=4 * 768 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.2 ,_SCREAMING_SNAKE_CASE=0 ,**_SCREAMING_SNAKE_CASE ,) -> Tuple: UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : List[Any] = max_position_embeddings UpperCAmelCase_ : Dict = sinusoidal_pos_embds UpperCAmelCase_ : Optional[Any] = n_layers UpperCAmelCase_ : Any = n_heads UpperCAmelCase_ : Optional[int] = dim UpperCAmelCase_ : str = hidden_dim UpperCAmelCase_ : Optional[Any] = dropout UpperCAmelCase_ : Tuple = attention_dropout UpperCAmelCase_ : Union[str, Any] = activation UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Dict = qa_dropout UpperCAmelCase_ : Optional[int] = seq_classif_dropout super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ) class __a( _a ): """simple docstring""" @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": UpperCAmelCase_ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase_ : List[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
30
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList __a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict: UpperCAmelCase_ : List[Any] = tokenizer UpperCAmelCase_ : int = dataset UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks UpperCAmelCase_ : Optional[int] = n_copies def __iter__( self ) -> Any: UpperCAmelCase_ : List[Any] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() ) UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]: UpperCAmelCase_ : str = start_length UpperCAmelCase_ : Optional[int] = eof_strings UpperCAmelCase_ : str = tokenizer def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) UpperCAmelCase_ : Optional[int] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(_SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase ) # last string should be "" return "".join(string_list[:-2] ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens for step, batch in tqdm(enumerate(_lowercase ) ): with torch.no_grad(): UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1] UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase ) # each task is generated batch_size times UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase ) UpperCAmelCase_ : Dict = accelerator.pad_across_processes( _lowercase , dim=1 , pad_index=tokenizer.pad_token_id ) UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) ) UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy() UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(_lowercase , _lowercase ): gen_token_dict[task].append(_lowercase ) UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase ) code_gens[task].append(remove_last_block(_lowercase ) ) return code_gens def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase ) UpperCAmelCase_ : int = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing UpperCAmelCase_ : List[Any] = '''false''' if args.num_workers is None: UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate UpperCAmelCase_ : int = Accelerator() set_seed(args.seed , device_specific=_lowercase ) # Load model and tokenizer UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt ) UpperCAmelCase_ : Any = tokenizer.eos_token UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings UpperCAmelCase_ : str = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ), } # Load evaluation dataset and metric UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' ) UpperCAmelCase_ : Dict = load_metric('''code_eval''' ) UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) UpperCAmelCase_ : str = args.n_samples // args.batch_size UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase ) # do not confuse args.batch_size, which is actually the num_return_sequences UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase ) UpperCAmelCase_ : int = complete_code( _lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , ) if accelerator.is_main_process: UpperCAmelCase_ : Any = [] for task in tqdm(range(_lowercase ) ): UpperCAmelCase_ : int = human_eval['''test'''][task]['''test'''] UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})''' references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute( references=_lowercase , predictions=_lowercase , num_workers=args.num_workers ) print(f'''Results: {pass_at_k}''' ) # Save results to json file with open(args.output_file , '''w''' ) as fp: json.dump(_lowercase , _lowercase ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
30
1
def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : Dict = [0 for i in range(r + 1 )] # nc0 = 1 UpperCAmelCase_ : int = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. UpperCAmelCase_ : Union[str, Any] = min(_lowercase , _lowercase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
30
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType __a = logging.get_logger(__name__) __a = { 'openai/imagegpt-small': '', 'openai/imagegpt-medium': '', 'openai/imagegpt-large': '', } class __a( _a ): """simple docstring""" lowerCAmelCase = '''imagegpt''' lowerCAmelCase = ['''past_key_values'''] lowerCAmelCase = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]: UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Union[str, Any] = n_positions UpperCAmelCase_ : Union[str, Any] = n_embd UpperCAmelCase_ : Any = n_layer UpperCAmelCase_ : Optional[Any] = n_head UpperCAmelCase_ : Union[str, Any] = n_inner UpperCAmelCase_ : List[Any] = activation_function UpperCAmelCase_ : List[str] = resid_pdrop UpperCAmelCase_ : str = embd_pdrop UpperCAmelCase_ : Optional[Any] = attn_pdrop UpperCAmelCase_ : Dict = layer_norm_epsilon UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Dict = scale_attn_weights UpperCAmelCase_ : Any = use_cache UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx UpperCAmelCase_ : Tuple = reorder_and_upcast_attn UpperCAmelCase_ : int = tie_word_embeddings super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) class __a( _a ): """simple docstring""" @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ] ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]: UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) ) return inputs
30
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __a( unittest.TestCase ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> Optional[int]: UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 30} UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30} UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : int = num_channels UpperCAmelCase_ : Any = min_resolution UpperCAmelCase_ : Tuple = max_resolution UpperCAmelCase_ : Optional[int] = do_resize_and_center_crop UpperCAmelCase_ : Tuple = size UpperCAmelCase_ : List[str] = crop_pct UpperCAmelCase_ : List[str] = crop_size UpperCAmelCase_ : Any = do_normalize UpperCAmelCase_ : str = image_mean UpperCAmelCase_ : List[Any] = image_std def a__ ( self ) -> str: return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a( _a , unittest.TestCase ): """simple docstring""" lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None def a__ ( self ) -> Dict: UpperCAmelCase_ : str = PoolFormerImageProcessingTester(self ) @property def a__ ( self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize_and_center_crop''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''crop_pct''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_mean''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_std''' ) ) def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'''shortest_edge''': 30} ) self.assertEqual(image_processor.crop_size ,{'''height''': 30, '''width''': 30} ) UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} ) def a__ ( self ) -> Optional[int]: pass def a__ ( self ) -> Dict: # Initialize image_processing UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image ) # Test not batched input UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) def a__ ( self ) -> List[Any]: # Initialize image_processing UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray ) # Test not batched input UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) def a__ ( self ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor ) # Test not batched input UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,)
30
import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() __a = [ 'word_embeddings_layernorm.weight', 'word_embeddings_layernorm.bias', 'input_layernorm.weight', 'input_layernorm.bias', 'post_attention_layernorm.weight', 'post_attention_layernorm.bias', 'self_attention.dense.bias', 'mlp.dense_4h_to_h.bias', 'ln_f.weight', 'ln_f.bias', ] __a = [ 'mlp.dense_4h_to_h.weight', 'self_attention.dense.weight', ] def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = { '''word_embeddings.weight''': '''word_embeddings.weight''', '''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''', '''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''', '''weight''': '''ln_f.weight''', '''bias''': '''ln_f.bias''', } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks UpperCAmelCase_ : Union[str, Any] = int(re.match(r'''.*layer_(\d*).*''' , _lowercase )[1] ) layer_number -= 3 return f'''h.{layer_number}.''' + key def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if dtype == torch.bool: return 1 / 8 UpperCAmelCase_ : Any = re.search(r'''[^\d](\d+)$''' , str(_lowercase ) ) if bit_search is None: raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' ) UpperCAmelCase_ : Optional[int] = int(bit_search.groups()[0] ) return bit_size // 8 def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if bloom_config_file == "": UpperCAmelCase_ : Tuple = BloomConfig() else: UpperCAmelCase_ : Optional[int] = BloomConfig.from_json_file(_lowercase ) if shard_model: UpperCAmelCase_ : Any = os.listdir(_lowercase ) UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) ) UpperCAmelCase_ : Any = {'''weight_map''': {}, '''metadata''': {}} UpperCAmelCase_ : List[str] = 0 UpperCAmelCase_ : Any = None UpperCAmelCase_ : Optional[int] = BloomConfig() for j, file in enumerate(_lowercase ): print('''Processing file: {}'''.format(_lowercase ) ) UpperCAmelCase_ : Optional[Any] = None for i in range(_lowercase ): # load all TP files UpperCAmelCase_ : Tuple = file.replace('''model_00''' , f'''model_0{i}''' ) UpperCAmelCase_ : Any = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' ) # Rename keys in the transformers names UpperCAmelCase_ : Dict = list(temp.keys() ) for key in keys: UpperCAmelCase_ : Union[str, Any] = temp.pop(_lowercase ) if tensors is None: UpperCAmelCase_ : Union[str, Any] = temp else: for key in tensors.keys(): if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel UpperCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks UpperCAmelCase_ : Tuple = torch.cat([tensors[key], temp[key]] , dim=_lowercase ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): UpperCAmelCase_ : List[str] = tensors[key] / pretraining_tp torch.save( _lowercase , os.path.join( _lowercase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): UpperCAmelCase_ : Union[str, Any] = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: UpperCAmelCase_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format( str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) UpperCAmelCase_ : List[Any] = BloomConfig() UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME UpperCAmelCase_ : List[str] = total_size with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) with open(os.path.join(_lowercase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f: UpperCAmelCase_ : Optional[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n''' f.write(_lowercase ) else: UpperCAmelCase_ : Any = BloomModel(_lowercase ) UpperCAmelCase_ : Tuple = os.listdir(_lowercase ) UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) ) UpperCAmelCase_ : Any = None for i, file in enumerate(_lowercase ): UpperCAmelCase_ : Optional[Any] = None for i in range(_lowercase ): # load all TP files UpperCAmelCase_ : List[Any] = file.replace('''model_00''' , f'''model_0{i}''' ) UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' ) # Rename keys in the transformers names UpperCAmelCase_ : str = list(temp.keys() ) for key in keys: UpperCAmelCase_ : Dict = temp.pop(_lowercase ) if tensors is None: UpperCAmelCase_ : int = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel UpperCAmelCase_ : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks UpperCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_lowercase ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): UpperCAmelCase_ : Dict = tensors[key] / pretraining_tp UpperCAmelCase_ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase ) assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected''' if missing_keys is None: UpperCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys ) else: UpperCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, f'''The keys {missing_keys} are missing''' # Save pytorch-model os.makedirs(_lowercase , exist_ok=_lowercase ) UpperCAmelCase_ : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' ) if config.torch_dtype is not None: UpperCAmelCase_ : Optional[int] = model.to(config.torch_dtype ) torch.save(model.state_dict() , _lowercase ) print(f'''Save configuration file to {pytorch_config_dump_path}''' ) with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--bloom_checkpoint_path', default=None, type=str, required=True, help='Path to the Megatron-LM checkpoint path.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--bloom_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--shard_model', action='store_true', help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint', ) parser.add_argument( '--pretraining_tp', default=4, type=int, help='Pretraining TP rank that has been used when training the model in Megatron-LM \n', ) __a = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
30
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class __a( _a ): """simple docstring""" lowerCAmelCase = '''speech_to_text''' lowerCAmelCase = ['''past_key_values'''] lowerCAmelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self ,_SCREAMING_SNAKE_CASE=10_000 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=2_048 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=6 ,_SCREAMING_SNAKE_CASE=2_048 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE="relu" ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=6_000 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=(5, 5) ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=80 ,_SCREAMING_SNAKE_CASE=1 ,**_SCREAMING_SNAKE_CASE ,) -> Union[str, Any]: UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : Any = d_model UpperCAmelCase_ : Union[str, Any] = encoder_ffn_dim UpperCAmelCase_ : List[str] = encoder_layers UpperCAmelCase_ : Tuple = encoder_attention_heads UpperCAmelCase_ : List[str] = decoder_ffn_dim UpperCAmelCase_ : int = decoder_layers UpperCAmelCase_ : int = decoder_attention_heads UpperCAmelCase_ : Tuple = dropout UpperCAmelCase_ : Any = attention_dropout UpperCAmelCase_ : Dict = activation_dropout UpperCAmelCase_ : Optional[Any] = activation_function UpperCAmelCase_ : int = init_std UpperCAmelCase_ : Dict = encoder_layerdrop UpperCAmelCase_ : List[str] = decoder_layerdrop UpperCAmelCase_ : Optional[int] = use_cache UpperCAmelCase_ : List[str] = encoder_layers UpperCAmelCase_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase_ : str = max_source_positions UpperCAmelCase_ : Any = max_target_positions UpperCAmelCase_ : Optional[int] = num_conv_layers UpperCAmelCase_ : Optional[int] = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = conv_channels UpperCAmelCase_ : List[Any] = input_feat_per_channel UpperCAmelCase_ : List[str] = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ''' f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,is_encoder_decoder=_SCREAMING_SNAKE_CASE ,decoder_start_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
30
def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Dict = 0 for i in range(1 , 1001 ): total += i**i return str(_lowercase )[-10:] if __name__ == "__main__": print(solution())
30
1
def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' while a != 0: UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = b % a, a return b def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' if gcd(_lowercase , _lowercase ) != 1: UpperCAmelCase_ : int = f'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(_lowercase ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = 1, 0, a UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = 0, 1, m while va != 0: UpperCAmelCase_ : List[Any] = ua // va UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
30
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType __a = None __a = '<' if sys.byteorder == 'little' else '>' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image __a = [ np.dtype('|b1'), np.dtype('|u1'), np.dtype('<u2'), np.dtype('>u2'), np.dtype('<i2'), np.dtype('>i2'), np.dtype('<u4'), np.dtype('>u4'), np.dtype('<i4'), np.dtype('>i4'), np.dtype('<f4'), np.dtype('>f4'), np.dtype('<f8'), np.dtype('>f8'), ] @dataclass class __a: """simple docstring""" lowerCAmelCase = True lowerCAmelCase = None # Automatically constructed lowerCAmelCase = "PIL.Image.Image" lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a ) def __call__( self ) -> Tuple: return self.pa_type def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : List[str] = np.array(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): return {"path": value, "bytes": None} elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): return {"path": None, "bytes": value} elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(_SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(_SCREAMING_SNAKE_CASE ) elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image": if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support decoding images, please install \'Pillow\'.''' ) if token_per_repo_id is None: UpperCAmelCase_ : Dict = {} UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = value['''path'''], value['''bytes'''] if bytes_ is None: if path is None: raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = PIL.Image.open(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ : Dict = path.split('''::''' )[-1] try: UpperCAmelCase_ : Optional[int] = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id'''] UpperCAmelCase_ : Tuple = token_per_repo_id.get(_SCREAMING_SNAKE_CASE ) except ValueError: UpperCAmelCase_ : Optional[Any] = None with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f: UpperCAmelCase_ : List[str] = BytesIO(f.read() ) UpperCAmelCase_ : Optional[Any] = PIL.Image.open(bytes_ ) else: UpperCAmelCase_ : List[Any] = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return ( self if self.decode else { "bytes": Value('''binary''' ), "path": Value('''string''' ), } ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray: if pa.types.is_string(storage.type ): UpperCAmelCase_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() ) UpperCAmelCase_ : Dict = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() ) UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: UpperCAmelCase_ : Dict = storage.field('''bytes''' ) else: UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: UpperCAmelCase_ : int = storage.field('''path''' ) else: UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() ) UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() ) elif pa.types.is_list(storage.type ): UpperCAmelCase_ : Optional[Any] = pa.array( [encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,) UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() ) UpperCAmelCase_ : Dict = pa.StructArray.from_arrays( [bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() ) return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(_SCREAMING_SNAKE_CASE ): with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f: UpperCAmelCase_ : Any = f.read() return bytes_ UpperCAmelCase_ : Union[str, Any] = pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] ,type=pa.binary() ,) UpperCAmelCase_ : List[str] = pa.array( [os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,) UpperCAmelCase_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() ) return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type ) def lowerCamelCase__ ( ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() UpperCAmelCase_ : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = BytesIO() if image.format in list_image_compression_formats(): UpperCAmelCase_ : int = image.format else: UpperCAmelCase_ : List[Any] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF''' image.save(_lowercase , format=_lowercase ) return buffer.getvalue() def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if hasattr(_lowercase , '''filename''' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(_lowercase )} def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) UpperCAmelCase_ : Tuple = array.dtype UpperCAmelCase_ : List[str] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER UpperCAmelCase_ : Dict = dtype.kind UpperCAmelCase_ : Union[str, Any] = dtype.itemsize UpperCAmelCase_ : Optional[Any] = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: UpperCAmelCase_ : Tuple = np.dtype('''|u1''' ) if dtype_kind not in ["u", "i"]: raise TypeError( f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: UpperCAmelCase_ : Union[str, Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: UpperCAmelCase_ : Union[str, Any] = dtype_byteorder + dtype_kind + str(_lowercase ) UpperCAmelCase_ : str = np.dtype(_lowercase ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) UpperCAmelCase_ : Any = PIL.Image.fromarray(array.astype(_lowercase ) ) return {"path": None, "bytes": image_to_bytes(_lowercase )} def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if objs: UpperCAmelCase_, UpperCAmelCase_ : Tuple = first_non_null_value(_lowercase ) if isinstance(_lowercase , _lowercase ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(_lowercase , np.ndarray ): UpperCAmelCase_ : Any = no_op_if_value_is_null(_lowercase ) return [obj_to_image_dict_func(_lowercase ) for obj in objs] elif isinstance(_lowercase , PIL.Image.Image ): UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase ) return [obj_to_image_dict_func(_lowercase ) for obj in objs] else: return objs else: return objs
30
1
import sys def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = len(_lowercase ) UpperCAmelCase_ : Tuple = [[0 for x in range(_lowercase )] for x in range(_lowercase )] UpperCAmelCase_ : Union[str, Any] = [[0 for x in range(_lowercase )] for x in range(_lowercase )] for chain_length in range(2 , _lowercase ): for a in range(1 , n - chain_length + 1 ): UpperCAmelCase_ : Union[str, Any] = a + chain_length - 1 UpperCAmelCase_ : Optional[Any] = sys.maxsize for c in range(_lowercase , _lowercase ): UpperCAmelCase_ : Tuple = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: UpperCAmelCase_ : Tuple = cost UpperCAmelCase_ : Union[str, Any] = c return matrix, sol def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if i == j: print('''A''' + str(_lowercase ) , end=''' ''' ) else: print('''(''' , end=''' ''' ) print_optiomal_solution(_lowercase , _lowercase , optimal_solution[i][j] ) print_optiomal_solution(_lowercase , optimal_solution[i][j] + 1 , _lowercase ) print(''')''' , end=''' ''' ) def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : str = [30, 35, 15, 5, 10, 20, 25] UpperCAmelCase_ : int = len(_lowercase ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 UpperCAmelCase_, UpperCAmelCase_ : int = matrix_chain_order(_lowercase ) print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) ) print_optiomal_solution(_lowercase , 1 , n - 1 ) if __name__ == "__main__": main()
30
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class __a( unittest.TestCase ): """simple docstring""" @slow def a__ ( self ) -> List[str]: UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) UpperCAmelCase_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(_SCREAMING_SNAKE_CASE ) from datasets import load_dataset UpperCAmelCase_ : Optional[int] = load_dataset('''nielsr/rvlcdip-demo''' ) UpperCAmelCase_ : Optional[Any] = dataset['''train'''][0]['''image'''].convert('''RGB''' ) UpperCAmelCase_ : str = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = outputs.logits UpperCAmelCase_ : Tuple = torch.Size((1, 16) ) self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = torch.tensor( [-0.41_58, -0.40_92, -0.43_47] ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.float ,) self.assertTrue(torch.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
30
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a = { 'configuration_conditional_detr': [ 'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConditionalDetrConfig', 'ConditionalDetrOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['ConditionalDetrFeatureExtractor'] __a = ['ConditionalDetrImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConditionalDetrForObjectDetection', 'ConditionalDetrForSegmentation', 'ConditionalDetrModel', 'ConditionalDetrPreTrainedModel', ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __a = logging.get_logger(__name__) class __a( _a ): """simple docstring""" def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None: warnings.warn( '''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use CLIPImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,) super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
30
1
from scipy.stats import pearsonr import datasets __a = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' __a = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' __a = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __a( datasets.Metric ): """simple docstring""" def a__ ( self ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] ,) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Optional[int]: if return_pvalue: UpperCAmelCase_ : List[str] = pearsonr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )[0] )}
30
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __a( unittest.TestCase ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> Optional[int]: UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 30} UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30} UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : int = num_channels UpperCAmelCase_ : Any = min_resolution UpperCAmelCase_ : Tuple = max_resolution UpperCAmelCase_ : Optional[int] = do_resize_and_center_crop UpperCAmelCase_ : Tuple = size UpperCAmelCase_ : List[str] = crop_pct UpperCAmelCase_ : List[str] = crop_size UpperCAmelCase_ : Any = do_normalize UpperCAmelCase_ : str = image_mean UpperCAmelCase_ : List[Any] = image_std def a__ ( self ) -> str: return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a( _a , unittest.TestCase ): """simple docstring""" lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None def a__ ( self ) -> Dict: UpperCAmelCase_ : str = PoolFormerImageProcessingTester(self ) @property def a__ ( self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize_and_center_crop''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''crop_pct''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_mean''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_std''' ) ) def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'''shortest_edge''': 30} ) self.assertEqual(image_processor.crop_size ,{'''height''': 30, '''width''': 30} ) UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} ) def a__ ( self ) -> Optional[int]: pass def a__ ( self ) -> Dict: # Initialize image_processing UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image ) # Test not batched input UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) def a__ ( self ) -> List[Any]: # Initialize image_processing UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray ) # Test not batched input UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) def a__ ( self ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor ) # Test not batched input UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,)
30
1
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( BaseOutput, OptionalDependencyNotAvailable, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) @dataclass class __a( _a ): """simple docstring""" lowerCAmelCase = 42 lowerCAmelCase = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionPixaPixZeroPipeline, ) else: from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline try: if not ( is_torch_available() and is_transformers_available() and is_k_diffusion_available() and is_k_diffusion_version('>=', '0.0.12') ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * # noqa F403 else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline if is_transformers_available() and is_flax_available(): import flax @flax.struct.dataclass class __a( _a ): """simple docstring""" lowerCAmelCase = 42 lowerCAmelCase = 42 from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
30
import unittest import numpy as np def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ): '''simple docstring''' UpperCAmelCase_ : Dict = np.shape(_lowercase ) UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase ) UpperCAmelCase_ : Tuple = np.shape(_lowercase ) if shape_a[0] != shape_b[0]: UpperCAmelCase_ : Tuple = ( '''Expected the same number of rows for A and B. ''' f'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(_lowercase ) if shape_b[1] != shape_c[1]: UpperCAmelCase_ : List[Any] = ( '''Expected the same number of columns for B and C. ''' f'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(_lowercase ) UpperCAmelCase_ : Dict = pseudo_inv if a_inv is None: try: UpperCAmelCase_ : Any = np.linalg.inv(_lowercase ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __a( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> None: UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] ) UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] ) UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE ) self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s ) def a__ ( self ) -> None: UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> None: UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
30
1
import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :] def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase="attention" ): '''simple docstring''' UpperCAmelCase_ : Dict = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] ) UpperCAmelCase_ : Dict = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) UpperCAmelCase_ : Tuple = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] ) UpperCAmelCase_ : Union[str, Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) UpperCAmelCase_ : Optional[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] ) UpperCAmelCase_ : List[Any] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) UpperCAmelCase_ : str = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] ) UpperCAmelCase_ : Optional[Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=False ): '''simple docstring''' if split_mlp_wi: UpperCAmelCase_ : Any = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :] UpperCAmelCase_ : str = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :] UpperCAmelCase_ : int = (wi_a, wi_a) else: UpperCAmelCase_ : str = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :] UpperCAmelCase_ : Optional[int] = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :] return wi, wo def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i] def lowerCamelCase__ ( _lowercase , *, _lowercase , _lowercase , _lowercase = False ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = traverse_util.flatten_dict(variables['''target'''] ) UpperCAmelCase_ : Optional[Any] = {'''/'''.join(_lowercase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi UpperCAmelCase_ : List[str] = '''encoder/encoder/mlp/wi_0/kernel''' in old print('''Split MLP:''' , _lowercase ) UpperCAmelCase_ : int = collections.OrderedDict() # Shared embeddings. UpperCAmelCase_ : Optional[Any] = old['''token_embedder/embedding'''] # Encoder. for i in range(_lowercase ): # Block i, layer 0 (Self Attention). UpperCAmelCase_ : str = tax_layer_norm_lookup(_lowercase , _lowercase , '''encoder''' , '''pre_attention_layer_norm''' ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : List[str] = tax_attention_lookup(_lowercase , _lowercase , '''encoder''' , '''attention''' ) UpperCAmelCase_ : Optional[Any] = layer_norm UpperCAmelCase_ : str = k.T UpperCAmelCase_ : Tuple = o.T UpperCAmelCase_ : Union[str, Any] = q.T UpperCAmelCase_ : Dict = v.T # Block i, layer 1 (MLP). UpperCAmelCase_ : str = tax_layer_norm_lookup(_lowercase , _lowercase , '''encoder''' , '''pre_mlp_layer_norm''' ) UpperCAmelCase_, UpperCAmelCase_ : Dict = tax_mlp_lookup(_lowercase , _lowercase , '''encoder''' , _lowercase ) UpperCAmelCase_ : List[Any] = layer_norm if split_mlp_wi: UpperCAmelCase_ : int = wi[0].T UpperCAmelCase_ : str = wi[1].T else: UpperCAmelCase_ : Optional[int] = wi.T UpperCAmelCase_ : List[Any] = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCAmelCase_ : List[str] = tax_relpos_bias_lookup( _lowercase , _lowercase , '''encoder''' ).T UpperCAmelCase_ : Optional[int] = old['''encoder/encoder_norm/scale'''] if not scalable_attention: UpperCAmelCase_ : str = tax_relpos_bias_lookup( _lowercase , 0 , '''encoder''' ).T UpperCAmelCase_ : Optional[int] = tax_relpos_bias_lookup( _lowercase , 0 , '''decoder''' ).T if not is_encoder_only: # Decoder. for i in range(_lowercase ): # Block i, layer 0 (Self Attention). UpperCAmelCase_ : Union[str, Any] = tax_layer_norm_lookup(_lowercase , _lowercase , '''decoder''' , '''pre_self_attention_layer_norm''' ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : List[str] = tax_attention_lookup(_lowercase , _lowercase , '''decoder''' , '''self_attention''' ) UpperCAmelCase_ : Union[str, Any] = layer_norm UpperCAmelCase_ : str = k.T UpperCAmelCase_ : Dict = o.T UpperCAmelCase_ : Optional[int] = q.T UpperCAmelCase_ : Union[str, Any] = v.T # Block i, layer 1 (Cross Attention). UpperCAmelCase_ : Union[str, Any] = tax_layer_norm_lookup(_lowercase , _lowercase , '''decoder''' , '''pre_cross_attention_layer_norm''' ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = tax_attention_lookup(_lowercase , _lowercase , '''decoder''' , '''encoder_decoder_attention''' ) UpperCAmelCase_ : Tuple = layer_norm UpperCAmelCase_ : int = k.T UpperCAmelCase_ : Optional[Any] = o.T UpperCAmelCase_ : Tuple = q.T UpperCAmelCase_ : Union[str, Any] = v.T # Block i, layer 2 (MLP). UpperCAmelCase_ : Optional[int] = tax_layer_norm_lookup(_lowercase , _lowercase , '''decoder''' , '''pre_mlp_layer_norm''' ) UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = tax_mlp_lookup(_lowercase , _lowercase , '''decoder''' , _lowercase ) UpperCAmelCase_ : List[Any] = layer_norm if split_mlp_wi: UpperCAmelCase_ : int = wi[0].T UpperCAmelCase_ : Any = wi[1].T else: UpperCAmelCase_ : Any = wi.T UpperCAmelCase_ : Dict = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCAmelCase_ : List[str] = tax_relpos_bias_lookup(_lowercase , _lowercase , '''decoder''' ).T UpperCAmelCase_ : List[str] = old['''decoder/decoder_norm/scale'''] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: UpperCAmelCase_ : Optional[Any] = old['''decoder/logits_dense/kernel'''].T return new def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[str] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: UpperCAmelCase_ : Tuple = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: UpperCAmelCase_ : Any = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('''Using shared word embeddings as lm_head.''' ) UpperCAmelCase_ : Tuple = state_dict['''shared.weight'''] return state_dict def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : int = checkpoints.load_tax_checkpoint(_lowercase ) UpperCAmelCase_ : List[Any] = convert_tax_to_pytorch( _lowercase , num_layers=config.num_layers , is_encoder_only=_lowercase , scalable_attention=_lowercase ) UpperCAmelCase_ : Optional[Any] = make_state_dict(_lowercase , _lowercase ) model.load_state_dict(_lowercase , strict=_lowercase ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = False , _lowercase = False , ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = MTaConfig.from_json_file(_lowercase ) print(f'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: UpperCAmelCase_ : Any = UMTaEncoderModel(_lowercase ) else: UpperCAmelCase_ : List[Any] = UMTaForConditionalGeneration(_lowercase ) # Load weights from tf checkpoint load_tax_weights_in_ta(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(_lowercase ) # Verify that we can load the checkpoint. model.from_pretrained(_lowercase ) print('''Done''' ) if __name__ == "__main__": __a = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) parser.add_argument( '--scalable_attention', action='store_true', help='Whether the model uses scaled attention (umt5 model)', default=False, ) __a = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
30
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): UpperCAmelCase_ : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_lowercase ) UpperCAmelCase_ : Any = ''''''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data ) UpperCAmelCase_ : Any = len(_lowercase ) % 6 != 0 if padding_needed: # The padding that will be added later UpperCAmelCase_ : Union[str, Any] = B'''=''' * ((6 - len(_lowercase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_lowercase ) % 6) else: UpperCAmelCase_ : int = B'''''' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_lowercase ) , 6 ) ).encode() + padding ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ): UpperCAmelCase_ : Tuple = ( '''argument should be a bytes-like object or ASCII string, ''' f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_lowercase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_lowercase , _lowercase ): try: UpperCAmelCase_ : Any = encoded_data.decode('''utf-8''' ) except UnicodeDecodeError: raise ValueError('''base64 encoded data should only contain ASCII characters''' ) UpperCAmelCase_ : str = encoded_data.count('''=''' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one UpperCAmelCase_ : List[Any] = encoded_data[:-padding] UpperCAmelCase_ : List[Any] = ''''''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: UpperCAmelCase_ : Tuple = ''''''.join( bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data ) UpperCAmelCase_ : str = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_lowercase ) , 8 ) ] return bytes(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
30
1
from __future__ import annotations __a = list[list[int]] # assigning initial values to the grid __a = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __a = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def lowerCamelCase__ ( _lowercase ): '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if location := find_empty_location(_lowercase ): UpperCAmelCase_, UpperCAmelCase_ : List[Any] = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(_lowercase , _lowercase , _lowercase , _lowercase ): UpperCAmelCase_ : Union[str, Any] = digit if sudoku(_lowercase ) is not None: return grid UpperCAmelCase_ : Tuple = 0 return None def lowerCamelCase__ ( _lowercase ): '''simple docstring''' for row in grid: for cell in row: print(_lowercase , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('\nExample grid:\n' + '=' * 20) print_solution(example_grid) print('\nExample grid solution:') __a = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('Cannot find a solution.')
30
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __a( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = 0 @slow def a__ ( self ) -> Any: for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 ) def a__ ( self ) -> Optional[Any]: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size ,12 ) def a__ ( self ) -> Tuple: UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size ,20 ) def a__ ( self ) -> List[str]: UpperCAmelCase_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Check that tokenizer_type ≠ model_type UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size ,12 ) def a__ ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) ) UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) ) UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) @require_tokenizers def a__ ( self ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) ) UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) ) UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> int: with pytest.raises(_SCREAMING_SNAKE_CASE ): AutoTokenizer.from_pretrained('''./''' ,tokenizer_type='''xxx''' ) @require_tokenizers def a__ ( self ) -> Optional[Any]: for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: UpperCAmelCase_ : Any = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE ) else: self.assertEqual(tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.model_max_length ,512 ) @require_tokenizers def a__ ( self ) -> List[Any]: for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' ,): UpperCAmelCase_ : int = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def a__ ( self ) -> Optional[Any]: # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai UpperCAmelCase_ : int = TOKENIZER_MAPPING.values() UpperCAmelCase_ : List[Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_SCREAMING_SNAKE_CASE ) @require_tokenizers def a__ ( self ) -> Tuple: self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ,use_fast=_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) ,_SCREAMING_SNAKE_CASE ) @require_tokenizers def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' ,do_lower_case=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = '''Hello, world. How are you?''' UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertEqual('''[UNK]''' ,tokens[0] ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' ,do_lower_case=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertEqual('''[UNK]''' ,tokens[0] ) @require_tokenizers def a__ ( self ) -> Dict: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.model_max_length ,512 ) self.assertEqual(tokenizer.vocab_size ,30_000 ) self.assertEqual(tokenizer.unk_token ,'''[UNK]''' ) self.assertEqual(tokenizer.padding_side ,'''right''' ) self.assertEqual(tokenizer.truncation_side ,'''right''' ) def a__ ( self ) -> Dict: UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size ,12 ) def a__ ( self ) -> Optional[Any]: UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> str: # Check we can load the tokenizer config of an online model. UpperCAmelCase_ : int = get_tokenizer_config('''bert-base-cased''' ) UpperCAmelCase_ : Optional[int] = config.pop('''_commit_hash''' ,_SCREAMING_SNAKE_CASE ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_SCREAMING_SNAKE_CASE ,{'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. UpperCAmelCase_ : Any = get_tokenizer_config(_SCREAMING_SNAKE_CASE ) self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = get_tokenizer_config(_SCREAMING_SNAKE_CASE ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] ,'''BertTokenizer''' ) def a__ ( self ) -> str: try: AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_SCREAMING_SNAKE_CASE ): AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = CustomTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def a__ ( self ) -> int: try: AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE ) # Can register in two steps AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_SCREAMING_SNAKE_CASE ): AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE ) bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def a__ ( self ) -> Optional[int]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' ) @require_tokenizers def a__ ( self ) -> Optional[int]: class __a( _a ): """simple docstring""" lowerCAmelCase = False class __a( _a ): """simple docstring""" lowerCAmelCase = NewTokenizer lowerCAmelCase = False try: AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def a__ ( self ) -> int: UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' ) def a__ ( self ) -> Optional[Any]: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' ) def a__ ( self ) -> List[Any]: with self.assertRaisesRegex( _SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' ) def a__ ( self ) -> Any: # Make sure we have cached the tokenizer. UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count ,0 ) self.assertEqual(counter.head_request_count ,1 ) self.assertEqual(counter.other_request_count ,0 )
30
1
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels __a = object() # For specifying empty leaf dict `{}` __a = object() def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(_lowercase ) - len(_lowercase ) + 1 ): UpperCAmelCase_ : List[Any] = [x.match(_lowercase ) for x, y in zip(_lowercase , ks[i:] )] if matches and all(_lowercase ): return True return False def lowerCamelCase__ ( _lowercase ): '''simple docstring''' def replace(_lowercase , _lowercase ): for rule, replacement in rules: if _match(_lowercase , _lowercase ): return replacement return val return replace def lowerCamelCase__ ( ): '''simple docstring''' return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , _lowercase )), (("transformer", "wte", "embedding"), P('''mp''' , _lowercase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_lowercase , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , _lowercase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(_lowercase , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , _lowercase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Any = _get_partition_rules() UpperCAmelCase_ : List[str] = _replacement_rules(_lowercase ) UpperCAmelCase_ : Dict = {k: _unmatched for k in flatten_dict(_lowercase )} UpperCAmelCase_ : Optional[Any] = {k: replace(_lowercase , _lowercase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(_lowercase ) )
30
from functools import reduce __a = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def lowerCamelCase__ ( _lowercase = N ): '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) ) for i in range(len(_lowercase ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
30
1
import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor __a = logging.get_logger(__name__) class __a( _a ): """simple docstring""" def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None: warnings.warn( '''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use DeformableDetrImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,) super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
30
from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) UpperCAmelCase_ : Tuple = precision UpperCAmelCase_ : Optional[Any] = ceil(precision / 14 ) UpperCAmelCase_ : int = 426880 * Decimal(10005 ).sqrt() UpperCAmelCase_ : Tuple = 1 UpperCAmelCase_ : List[Any] = 13591409 UpperCAmelCase_ : Optional[Any] = Decimal(_lowercase ) for k in range(1 , _lowercase ): UpperCAmelCase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": __a = 50 print(F"""The first {n} digits of pi is: {pi(n)}""")
30
1
import os from datetime import datetime as dt from github import Github __a = [ 'good first issue', 'feature request', 'wip', ] def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : str = Github(os.environ['''GITHUB_TOKEN'''] ) UpperCAmelCase_ : Union[str, Any] = g.get_repo('''huggingface/accelerate''' ) UpperCAmelCase_ : str = repo.get_issues(state='''open''' ) for issue in open_issues: UpperCAmelCase_ : Dict = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase ) UpperCAmelCase_ : Optional[Any] = comments[0] if len(_lowercase ) > 0 else None UpperCAmelCase_ : Optional[Any] = dt.utcnow() UpperCAmelCase_ : List[Any] = (current_time - issue.updated_at).days UpperCAmelCase_ : str = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state='''closed''' ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
30
from __future__ import annotations import math __a = '2020.9.26' __a = 'xcodz-dot, cclaus, dhruvmanila' def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ): UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}''' raise TypeError(_lowercase ) UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): raise TypeError('''Axis must be a str''' ) UpperCAmelCase_ : Optional[Any] = locals() del input_variables["axis"] if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ): UpperCAmelCase_ : List[Any] = ( '''Input values except axis must either be float or int: ''' f'''{list(input_variables.values() )}''' ) raise TypeError(_lowercase ) UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi if axis == "z": UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase ) UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = z elif axis == "x": UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase ) UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase ) UpperCAmelCase_ : Dict = x elif axis == "y": UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase ) UpperCAmelCase_ : Optional[int] = y else: raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""") print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
30
1
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
30
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __a = '2.13.1' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('3.7'): raise ImportWarning( 'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( 'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n' 'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __a = concatenate_datasets __a = DownloadConfig __a = DownloadManager __a = DownloadMode __a = DownloadConfig __a = DownloadMode __a = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
30
1
import numpy as np import datasets __a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' __a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' __a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __a( datasets.Metric ): """simple docstring""" def a__ ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''' ,id='''sequence''' ) ,id='''X''' ), } ) ,) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: # convert to numpy arrays UpperCAmelCase_ : str = np.array(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.array(_SCREAMING_SNAKE_CASE ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('''Expected `X` to be a 2D vector''' ) if len(reference_distribution.shape ) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''' ) if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' ) # Get mahalanobis distance for each prediction UpperCAmelCase_ : List[str] = X - np.mean(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = np.cov(reference_distribution.T ) try: UpperCAmelCase_ : Any = np.linalg.inv(_SCREAMING_SNAKE_CASE ) except np.linalg.LinAlgError: UpperCAmelCase_ : List[str] = np.linalg.pinv(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.dot(_SCREAMING_SNAKE_CASE ,X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
30
def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' while a != 0: UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = b % a, a return b def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' if gcd(_lowercase , _lowercase ) != 1: UpperCAmelCase_ : int = f'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(_lowercase ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = 1, 0, a UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = 0, 1, m while va != 0: UpperCAmelCase_ : List[Any] = ua // va UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
30
1
from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0 ) -> Optional[Any]: UpperCAmelCase_ : str = 1.0 if scale is None else scale UpperCAmelCase_ : Any = 0.0 if loc is None else loc super().__init__(_SCREAMING_SNAKE_CASE ,[AffineTransform(loc=self.loc ,scale=self.scale ,event_dim=_SCREAMING_SNAKE_CASE )] ) @property def a__ ( self ) -> Any: return self.base_dist.mean * self.scale + self.loc @property def a__ ( self ) -> Optional[Any]: return self.base_dist.variance * self.scale**2 @property def a__ ( self ) -> int: return self.variance.sqrt() class __a( nn.Module ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None: super().__init__(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = args_dim UpperCAmelCase_ : int = nn.ModuleList([nn.Linear(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] ) UpperCAmelCase_ : Optional[int] = domain_map def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Tuple[torch.Tensor]: UpperCAmelCase_ : Optional[int] = [proj(_SCREAMING_SNAKE_CASE ) for proj in self.proj] return self.domain_map(*_SCREAMING_SNAKE_CASE ) class __a( nn.Module ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: super().__init__() UpperCAmelCase_ : Tuple = function def a__ ( self ,_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ) -> Tuple: return self.function(_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ) class __a: """simple docstring""" lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 def __init__( self ,_SCREAMING_SNAKE_CASE = 1 ) -> None: UpperCAmelCase_ : int = dim UpperCAmelCase_ : Union[str, Any] = {k: dim * self.args_dim[k] for k in self.args_dim} def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: if self.dim == 1: return self.distribution_class(*_SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(*_SCREAMING_SNAKE_CASE ) ,1 ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> Distribution: UpperCAmelCase_ : str = self._base_distribution(_SCREAMING_SNAKE_CASE ) if loc is None and scale is None: return distr else: return AffineTransformed(_SCREAMING_SNAKE_CASE ,loc=_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ,event_dim=self.event_dim ) @property def a__ ( self ) -> Tuple: return () if self.dim == 1 else (self.dim,) @property def a__ ( self ) -> int: return len(self.event_shape ) @property def a__ ( self ) -> float: return 0.0 def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> nn.Module: return ParameterProjection( in_features=_SCREAMING_SNAKE_CASE ,args_dim=self.args_dim ,domain_map=LambdaLayer(self.domain_map ) ,) def a__ ( self ,*_SCREAMING_SNAKE_CASE ) -> Any: raise NotImplementedError() @staticmethod def a__ ( _SCREAMING_SNAKE_CASE ) -> torch.Tensor: return (x + torch.sqrt(torch.square(_SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0 class __a( _a ): """simple docstring""" lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1} lowerCAmelCase = StudentT @classmethod def a__ ( cls ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: UpperCAmelCase_ : Optional[Any] = cls.squareplus(_SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) UpperCAmelCase_ : Union[str, Any] = 2.0 + cls.squareplus(_SCREAMING_SNAKE_CASE ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class __a( _a ): """simple docstring""" lowerCAmelCase = {"loc": 1, "scale": 1} lowerCAmelCase = Normal @classmethod def a__ ( cls ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]: UpperCAmelCase_ : Tuple = cls.squareplus(_SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class __a( _a ): """simple docstring""" lowerCAmelCase = {"total_count": 1, "logits": 1} lowerCAmelCase = NegativeBinomial @classmethod def a__ ( cls ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = cls.squareplus(_SCREAMING_SNAKE_CASE ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Distribution: UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = distr_args if self.dim == 1: return self.distribution_class(total_count=_SCREAMING_SNAKE_CASE ,logits=_SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(total_count=_SCREAMING_SNAKE_CASE ,logits=_SCREAMING_SNAKE_CASE ) ,1 ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ) -> Distribution: UpperCAmelCase_, UpperCAmelCase_ : Dict = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
30
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __a( _a ): """simple docstring""" lowerCAmelCase = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) lowerCAmelCase = '''CIDAS/clipseg-rd64-refined''' lowerCAmelCase = '''image_segmenter''' lowerCAmelCase = CLIPSegForImageSegmentation lowerCAmelCase = ['''image''', '''text'''] lowerCAmelCase = ['''image'''] def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict: requires_backends(self ,['''vision'''] ) super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: return self.pre_processor(text=[label] ,images=[image] ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str: with torch.no_grad(): UpperCAmelCase_ : Dict = self.model(**_SCREAMING_SNAKE_CASE ).logits return logits def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict: UpperCAmelCase_ : Dict = outputs.cpu().detach().numpy() UpperCAmelCase_ : Any = 0 UpperCAmelCase_ : List[Any] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
30
1
from collections.abc import Sequence from queue import Queue class __a: """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ) -> Tuple: UpperCAmelCase_ : Optional[int] = start UpperCAmelCase_ : Optional[int] = end UpperCAmelCase_ : Any = val UpperCAmelCase_ : Tuple = (start + end) // 2 UpperCAmelCase_ : int = left UpperCAmelCase_ : Dict = right def __repr__( self ) -> Union[str, Any]: return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})''' class __a: """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: UpperCAmelCase_ : int = collection UpperCAmelCase_ : int = function if self.collection: UpperCAmelCase_ : Optional[Any] = self._build_tree(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: self._update_tree(self.root ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: return self._query_range(self.root ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]: if start == end: return SegmentTreeNode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,self.collection[start] ) UpperCAmelCase_ : Optional[int] = (start + end) // 2 UpperCAmelCase_ : str = self._build_tree(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = self._build_tree(mid + 1 ,_SCREAMING_SNAKE_CASE ) return SegmentTreeNode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,self.fn(left.val ,right.val ) ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict: if node.start == i and node.end == i: UpperCAmelCase_ : Optional[Any] = val return if i <= node.mid: self._update_tree(node.left ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else: self._update_tree(node.right ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = self.fn(node.left.val ,node.right.val ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left ,_SCREAMING_SNAKE_CASE ,node.mid ) ,self._query_range(node.right ,node.mid + 1 ,_SCREAMING_SNAKE_CASE ) ,) else: # range in right child tree return self._query_range(node.right ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> int: if self.root is not None: UpperCAmelCase_ : int = Queue() queue.put(self.root ) while not queue.empty(): UpperCAmelCase_ : Tuple = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('*' * 50) __a = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
30
import numpy as np import datasets __a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' __a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' __a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __a( datasets.Metric ): """simple docstring""" def a__ ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''' ,id='''sequence''' ) ,id='''X''' ), } ) ,) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: # convert to numpy arrays UpperCAmelCase_ : str = np.array(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.array(_SCREAMING_SNAKE_CASE ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('''Expected `X` to be a 2D vector''' ) if len(reference_distribution.shape ) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''' ) if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' ) # Get mahalanobis distance for each prediction UpperCAmelCase_ : List[str] = X - np.mean(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = np.cov(reference_distribution.T ) try: UpperCAmelCase_ : Any = np.linalg.inv(_SCREAMING_SNAKE_CASE ) except np.linalg.LinAlgError: UpperCAmelCase_ : List[str] = np.linalg.pinv(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = np.dot(_SCREAMING_SNAKE_CASE ,X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
30
1
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __a = logging.get_logger(__name__) __a = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class __a( _a ): """simple docstring""" lowerCAmelCase = '''gptj''' lowerCAmelCase = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self ,_SCREAMING_SNAKE_CASE=50_400 ,_SCREAMING_SNAKE_CASE=2_048 ,_SCREAMING_SNAKE_CASE=4_096 ,_SCREAMING_SNAKE_CASE=28 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="gelu_new" ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=50_256 ,_SCREAMING_SNAKE_CASE=50_256 ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Tuple: UpperCAmelCase_ : List[str] = vocab_size UpperCAmelCase_ : str = n_positions UpperCAmelCase_ : Dict = n_embd UpperCAmelCase_ : Tuple = n_layer UpperCAmelCase_ : Any = n_head UpperCAmelCase_ : Dict = n_inner UpperCAmelCase_ : Dict = rotary_dim UpperCAmelCase_ : Any = activation_function UpperCAmelCase_ : List[Any] = resid_pdrop UpperCAmelCase_ : Tuple = embd_pdrop UpperCAmelCase_ : Optional[Any] = attn_pdrop UpperCAmelCase_ : Optional[int] = layer_norm_epsilon UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Dict = use_cache UpperCAmelCase_ : Dict = bos_token_id UpperCAmelCase_ : Union[str, Any] = eos_token_id super().__init__( bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = "default" ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,) -> Dict: super().__init__(_SCREAMING_SNAKE_CASE ,task=_SCREAMING_SNAKE_CASE ,patching_specs=_SCREAMING_SNAKE_CASE ,use_past=_SCREAMING_SNAKE_CASE ) if not getattr(self._config ,'''pad_token_id''' ,_SCREAMING_SNAKE_CASE ): # TODO: how to do that better? UpperCAmelCase_ : Any = 0 @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: UpperCAmelCase_ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE ,direction='''inputs''' ) UpperCAmelCase_ : List[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: UpperCAmelCase_ : int = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def a__ ( self ) -> int: return self._config.n_layer @property def a__ ( self ) -> int: return self._config.n_head def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,) -> Mapping[str, Any]: UpperCAmelCase_ : Any = super(_SCREAMING_SNAKE_CASE ,self ).generate_dummy_inputs( _SCREAMING_SNAKE_CASE ,batch_size=_SCREAMING_SNAKE_CASE ,seq_length=_SCREAMING_SNAKE_CASE ,is_pair=_SCREAMING_SNAKE_CASE ,framework=_SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() UpperCAmelCase_ : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase_, UpperCAmelCase_ : Any = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values UpperCAmelCase_ : int = seqlen + 2 UpperCAmelCase_ : Optional[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) UpperCAmelCase_ : Optional[Any] = [ (torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] UpperCAmelCase_ : List[Any] = common_inputs['''attention_mask'''] if self.use_past: UpperCAmelCase_ : List[str] = ordered_inputs['''attention_mask'''].dtype UpperCAmelCase_ : Tuple = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE )] ,dim=1 ) return ordered_inputs @property def a__ ( self ) -> int: return 13
30
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __a = logging.get_logger(__name__) __a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __a = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'facebook/dpr-ctx_encoder-single-nq-base': 512, 'facebook/dpr-ctx_encoder-multiset-base': 512, } __a = { 'facebook/dpr-question_encoder-single-nq-base': 512, 'facebook/dpr-question_encoder-multiset-base': 512, } __a = { 'facebook/dpr-reader-single-nq-base': 512, 'facebook/dpr-reader-multiset-base': 512, } __a = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } __a = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } __a = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class __a( _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __a( _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __a = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) __a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) __a = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(_a ) class __a: """simple docstring""" def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding: if titles is None and texts is None: return super().__call__( _SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) elif titles is None or texts is None: UpperCAmelCase_ : List[str] = titles if texts is None else texts return super().__call__( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : List[Any] = titles if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [titles] UpperCAmelCase_ : List[str] = texts if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [texts] UpperCAmelCase_ : Any = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = questions if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [questions] * n_passages if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError( f'''There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.''' ) UpperCAmelCase_ : Tuple = super().__call__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids'''] UpperCAmelCase_ : int = super().__call__(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids'''] UpperCAmelCase_ : Optional[int] = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ] } if return_attention_mask is not False: UpperCAmelCase_ : List[str] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) UpperCAmelCase_ : Dict = attention_mask return self.pad(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 4 ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Tuple = reader_input['''input_ids'''] UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = reader_output[:3] UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = sorted(range(_SCREAMING_SNAKE_CASE ) ,reverse=_SCREAMING_SNAKE_CASE ,key=relevance_logits.__getitem__ ) UpperCAmelCase_ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: UpperCAmelCase_ : List[Any] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id ) else: UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_SCREAMING_SNAKE_CASE ,top_spans=_SCREAMING_SNAKE_CASE ,) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_SCREAMING_SNAKE_CASE ,start_index=_SCREAMING_SNAKE_CASE ,end_index=_SCREAMING_SNAKE_CASE ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) ) if len(_SCREAMING_SNAKE_CASE ) >= num_spans: break return nbest_spans_predictions[:num_spans] def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Tuple = [] for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) UpperCAmelCase_ : int = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ,reverse=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' ) UpperCAmelCase_ : str = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_SCREAMING_SNAKE_CASE ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_a ) class __a( _a , _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase = ['''input_ids''', '''attention_mask''']
30
1
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL __a = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11') def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=False , ): '''simple docstring''' output_path.parent.mkdir(parents=_lowercase , exist_ok=_lowercase ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( _lowercase , _lowercase , f=output_path.as_posix() , input_names=_lowercase , output_names=_lowercase , dynamic_axes=_lowercase , do_constant_folding=_lowercase , use_external_data_format=_lowercase , enable_onnx_checker=_lowercase , opset_version=_lowercase , ) else: export( _lowercase , _lowercase , f=output_path.as_posix() , input_names=_lowercase , output_names=_lowercase , dynamic_axes=_lowercase , do_constant_folding=_lowercase , opset_version=_lowercase , ) @torch.no_grad() def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = False ): '''simple docstring''' UpperCAmelCase_ : Any = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): UpperCAmelCase_ : Tuple = '''cuda''' elif fpaa and not torch.cuda.is_available(): raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' ) else: UpperCAmelCase_ : Optional[int] = '''cpu''' UpperCAmelCase_ : List[str] = Path(_lowercase ) # VAE DECODER UpperCAmelCase_ : Optional[Any] = AutoencoderKL.from_pretrained(model_path + '''/vae''' ) UpperCAmelCase_ : Union[str, Any] = vae_decoder.config.latent_channels # forward only through the decoder part UpperCAmelCase_ : Optional[Any] = vae_decoder.decode onnx_export( _lowercase , model_args=( torch.randn(1 , _lowercase , 25 , 25 ).to(device=_lowercase , dtype=_lowercase ), False, ) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={ '''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=_lowercase , ) del vae_decoder if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( '--model_path', type=str, required=True, help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).', ) parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.') parser.add_argument( '--opset', default=14, type=int, help='The version of the ONNX operator set to use.', ) parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode') __a = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print('SD: Done: ONNX')
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __a = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
import argparse __a = 'docs/source/_static/js/custom.js' def lowerCamelCase__ ( _lowercase ): '''simple docstring''' with open(_lowercase , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCAmelCase_ : List[Any] = f.readlines() UpperCAmelCase_ : List[Any] = 0 # First let's put the right version while not lines[index].startswith('''const stableVersion =''' ): index += 1 UpperCAmelCase_ : Optional[int] = f'''const stableVersion = "v{version}"\n''' # Then update the dictionary while not lines[index].startswith('''const versionMapping = {''' ): index += 1 # We go until the end while not lines[index].startswith('''}''' ): index += 1 # We add the new version at the end lines[index - 1] += f''' "v{version}": "v{version}",\n''' with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--version', help='Release version.') __a = parser.parse_args() update_custom_js(args.version)
30
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class __a( _a ): """simple docstring""" lowerCAmelCase = '''wav2vec2''' def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="sum" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]: super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = hidden_size UpperCAmelCase_ : Tuple = feat_extract_norm UpperCAmelCase_ : List[Any] = feat_extract_activation UpperCAmelCase_ : str = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = conv_bias UpperCAmelCase_ : str = num_conv_pos_embeddings UpperCAmelCase_ : Any = num_conv_pos_embedding_groups UpperCAmelCase_ : Tuple = len(self.conv_dim ) UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Dict = intermediate_size UpperCAmelCase_ : Any = hidden_act UpperCAmelCase_ : Any = num_attention_heads UpperCAmelCase_ : str = hidden_dropout UpperCAmelCase_ : int = attention_dropout UpperCAmelCase_ : Tuple = activation_dropout UpperCAmelCase_ : List[str] = feat_proj_dropout UpperCAmelCase_ : int = final_dropout UpperCAmelCase_ : Union[str, Any] = layerdrop UpperCAmelCase_ : Optional[Any] = layer_norm_eps UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : List[str] = vocab_size UpperCAmelCase_ : Optional[int] = do_stable_layer_norm UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase_ : Optional[int] = apply_spec_augment UpperCAmelCase_ : Tuple = mask_time_prob UpperCAmelCase_ : Optional[Any] = mask_time_length UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks UpperCAmelCase_ : Optional[Any] = mask_feature_prob UpperCAmelCase_ : str = mask_feature_length UpperCAmelCase_ : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations UpperCAmelCase_ : Union[str, Any] = num_codevectors_per_group UpperCAmelCase_ : Any = num_codevector_groups UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature UpperCAmelCase_ : List[str] = feat_quantizer_dropout UpperCAmelCase_ : Dict = num_negatives UpperCAmelCase_ : List[str] = codevector_dim UpperCAmelCase_ : List[str] = proj_codevector_dim UpperCAmelCase_ : str = diversity_loss_weight # ctc loss UpperCAmelCase_ : List[Any] = ctc_loss_reduction UpperCAmelCase_ : List[str] = ctc_zero_infinity # adapter UpperCAmelCase_ : Optional[Any] = add_adapter UpperCAmelCase_ : Any = adapter_kernel_size UpperCAmelCase_ : Optional[int] = adapter_stride UpperCAmelCase_ : List[Any] = num_adapter_layers UpperCAmelCase_ : Optional[Any] = output_hidden_size or hidden_size UpperCAmelCase_ : Optional[int] = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. UpperCAmelCase_ : List[str] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. UpperCAmelCase_ : List[str] = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = xvector_output_dim @property def a__ ( self ) -> Any: return functools.reduce(operator.mul ,self.conv_stride ,1 )
30
1
import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class __a( unittest.TestCase ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=18 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,) -> Dict: UpperCAmelCase_ : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 18} UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : Dict = num_channels UpperCAmelCase_ : Tuple = image_size UpperCAmelCase_ : List[str] = min_resolution UpperCAmelCase_ : Any = max_resolution UpperCAmelCase_ : Optional[int] = do_resize UpperCAmelCase_ : Any = size UpperCAmelCase_ : str = do_normalize def a__ ( self ) -> Optional[int]: return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class __a( _a , unittest.TestCase ): """simple docstring""" lowerCAmelCase = ImageGPTImageProcessor if is_vision_available() else None def a__ ( self ) -> int: UpperCAmelCase_ : Dict = ImageGPTImageProcessingTester(self ) @property def a__ ( self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> Any: UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''clusters''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) ) def a__ ( self ) -> List[Any]: UpperCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 18} ) UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ) self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} ) def a__ ( self ) -> int: UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase_ : Tuple = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,obj[key] ) ) else: self.assertEqual(obj[key] ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Dict: UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ : Any = os.path.join(_SCREAMING_SNAKE_CASE ,'''image_processor.json''' ) image_processor_first.to_json_file(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_json_file(_SCREAMING_SNAKE_CASE ).to_dict() UpperCAmelCase_ : int = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Any: UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = self.image_processing_class.from_pretrained(_SCREAMING_SNAKE_CASE ).to_dict() UpperCAmelCase_ : Dict = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,_SCREAMING_SNAKE_CASE ) @unittest.skip('''ImageGPT requires clusters at initialization''' ) def a__ ( self ) -> str: pass def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' ) UpperCAmelCase_ : Optional[int] = Image.open(dataset[4]['''file'''] ) UpperCAmelCase_ : Any = Image.open(dataset[5]['''file'''] ) UpperCAmelCase_ : Union[str, Any] = [imagea, imagea] return images @require_vision @require_torch class __a( unittest.TestCase ): """simple docstring""" @slow def a__ ( self ) -> List[str]: UpperCAmelCase_ : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' ) UpperCAmelCase_ : Any = prepare_images() # test non-batched UpperCAmelCase_ : Tuple = image_processing(images[0] ,return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(1, 1_024) ) UpperCAmelCase_ : str = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() ,_SCREAMING_SNAKE_CASE ) # test batched UpperCAmelCase_ : List[str] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(2, 1_024) ) UpperCAmelCase_ : List[str] = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() ,_SCREAMING_SNAKE_CASE )
30
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['NllbTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['NllbTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
1
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( '''files''' , [ ['''full:README.md''', '''dataset_infos.json'''], ['''empty:README.md''', '''dataset_infos.json'''], ['''dataset_infos.json'''], ['''full:README.md'''], ] , ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : int = tmp_path_factory.mktemp('''dset_infos_dir''' ) if "full:README.md" in files: with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f: f.write('''---\ndataset_info:\n dataset_size: 42\n---''' ) if "empty:README.md" in files: with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f: f.write('''''' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f: f.write('''{"default": {"dataset_size": 42}}''' ) UpperCAmelCase_ : List[Any] = DatasetInfosDict.from_directory(_lowercase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( '''dataset_info''' , [ DatasetInfo(), DatasetInfo( description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ), ] , ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[str] = str(_lowercase ) dataset_info.write_to_directory(_lowercase ) UpperCAmelCase_ : int = DatasetInfo.from_directory(_lowercase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(_lowercase , '''dataset_info.json''' ) ) def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Tuple = DatasetInfo( description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) UpperCAmelCase_ : List[str] = dataset_info._to_yaml_dict() assert sorted(_lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) UpperCAmelCase_ : str = yaml.safe_dump(_lowercase ) UpperCAmelCase_ : List[Any] = yaml.safe_load(_lowercase ) assert dataset_info_yaml_dict == reloaded def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = DatasetInfo() UpperCAmelCase_ : Union[str, Any] = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( '''dataset_infos_dict''' , [ DatasetInfosDict(), DatasetInfosDict({'''default''': DatasetInfo()} ), DatasetInfosDict({'''my_config_name''': DatasetInfo()} ), DatasetInfosDict( { '''default''': DatasetInfo( description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ) } ), DatasetInfosDict( { '''v1''': DatasetInfo(dataset_size=42 ), '''v2''': DatasetInfo(dataset_size=1337 ), } ), ] , ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = str(_lowercase ) dataset_infos_dict.write_to_directory(_lowercase ) UpperCAmelCase_ : Dict = DatasetInfosDict.from_directory(_lowercase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): UpperCAmelCase_ : List[str] = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml UpperCAmelCase_ : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(_lowercase , '''README.md''' ) )
30
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList __a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict: UpperCAmelCase_ : List[Any] = tokenizer UpperCAmelCase_ : int = dataset UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks UpperCAmelCase_ : Optional[int] = n_copies def __iter__( self ) -> Any: UpperCAmelCase_ : List[Any] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() ) UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class __a( _a ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]: UpperCAmelCase_ : str = start_length UpperCAmelCase_ : Optional[int] = eof_strings UpperCAmelCase_ : str = tokenizer def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) UpperCAmelCase_ : Optional[int] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(_SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase ) # last string should be "" return "".join(string_list[:-2] ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens for step, batch in tqdm(enumerate(_lowercase ) ): with torch.no_grad(): UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1] UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase ) # each task is generated batch_size times UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase ) UpperCAmelCase_ : Dict = accelerator.pad_across_processes( _lowercase , dim=1 , pad_index=tokenizer.pad_token_id ) UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) ) UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy() UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(_lowercase , _lowercase ): gen_token_dict[task].append(_lowercase ) UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase ) code_gens[task].append(remove_last_block(_lowercase ) ) return code_gens def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase ) UpperCAmelCase_ : int = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing UpperCAmelCase_ : List[Any] = '''false''' if args.num_workers is None: UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate UpperCAmelCase_ : int = Accelerator() set_seed(args.seed , device_specific=_lowercase ) # Load model and tokenizer UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt ) UpperCAmelCase_ : Any = tokenizer.eos_token UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings UpperCAmelCase_ : str = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ), } # Load evaluation dataset and metric UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' ) UpperCAmelCase_ : Dict = load_metric('''code_eval''' ) UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) UpperCAmelCase_ : str = args.n_samples // args.batch_size UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase ) # do not confuse args.batch_size, which is actually the num_return_sequences UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase ) UpperCAmelCase_ : int = complete_code( _lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , ) if accelerator.is_main_process: UpperCAmelCase_ : Any = [] for task in tqdm(range(_lowercase ) ): UpperCAmelCase_ : int = human_eval['''test'''][task]['''test'''] UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})''' references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute( references=_lowercase , predictions=_lowercase , num_workers=args.num_workers ) print(f'''Results: {pass_at_k}''' ) # Save results to json file with open(args.output_file , '''w''' ) as fp: json.dump(_lowercase , _lowercase ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
30
1
import math import sys def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[str] = '''''' try: with open(_lowercase , '''rb''' ) as binary_file: UpperCAmelCase_ : Dict = binary_file.read() for dat in data: UpperCAmelCase_ : Union[str, Any] = f'''{dat:08b}''' result += curr_byte return result except OSError: print('''File not accessible''' ) sys.exit() def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = {'''0''': '''0''', '''1''': '''1'''} UpperCAmelCase_, UpperCAmelCase_ : str = '''''', '''''' UpperCAmelCase_ : Any = len(_lowercase ) for i in range(len(_lowercase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue UpperCAmelCase_ : Any = lexicon[curr_string] result += last_match_id UpperCAmelCase_ : Any = last_match_id + '''0''' if math.loga(_lowercase ).is_integer(): UpperCAmelCase_ : int = {} for curr_key in list(_lowercase ): UpperCAmelCase_ : List[Any] = lexicon.pop(_lowercase ) UpperCAmelCase_ : List[Any] = new_lex UpperCAmelCase_ : Any = last_match_id + '''1''' index += 1 UpperCAmelCase_ : List[Any] = '''''' return result def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : str = 8 try: with open(_lowercase , '''wb''' ) as opened_file: UpperCAmelCase_ : List[str] = [ to_write[i : i + byte_length] for i in range(0 , len(_lowercase ) , _lowercase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('''10000000''' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(_lowercase , 2 ).to_bytes(1 , byteorder='''big''' ) ) except OSError: print('''File not accessible''' ) sys.exit() def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Dict = 0 for letter in data_bits: if letter == "1": break counter += 1 UpperCAmelCase_ : Optional[int] = data_bits[counter:] UpperCAmelCase_ : Dict = data_bits[counter + 1 :] return data_bits def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = read_file_binary(_lowercase ) UpperCAmelCase_ : Optional[Any] = remove_prefix(_lowercase ) UpperCAmelCase_ : Tuple = decompress_data(_lowercase ) write_file_binary(_lowercase , _lowercase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
30
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType __a = logging.get_logger(__name__) __a = { 'openai/imagegpt-small': '', 'openai/imagegpt-medium': '', 'openai/imagegpt-large': '', } class __a( _a ): """simple docstring""" lowerCAmelCase = '''imagegpt''' lowerCAmelCase = ['''past_key_values'''] lowerCAmelCase = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]: UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Union[str, Any] = n_positions UpperCAmelCase_ : Union[str, Any] = n_embd UpperCAmelCase_ : Any = n_layer UpperCAmelCase_ : Optional[Any] = n_head UpperCAmelCase_ : Union[str, Any] = n_inner UpperCAmelCase_ : List[Any] = activation_function UpperCAmelCase_ : List[str] = resid_pdrop UpperCAmelCase_ : str = embd_pdrop UpperCAmelCase_ : Optional[Any] = attn_pdrop UpperCAmelCase_ : Dict = layer_norm_epsilon UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Dict = scale_attn_weights UpperCAmelCase_ : Any = use_cache UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx UpperCAmelCase_ : Tuple = reorder_and_upcast_attn UpperCAmelCase_ : int = tie_word_embeddings super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) class __a( _a ): """simple docstring""" @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ] ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]: UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) ) return inputs
30
1
import sys __a = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Any = 1 for digit in s: product *= int(_lowercase ) return product def lowerCamelCase__ ( _lowercase = N ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = -sys.maxsize - 1 UpperCAmelCase_ : Dict = n[:13] UpperCAmelCase_ : Optional[Any] = 13 while cur_index < len(_lowercase ) - 13: if int(n[cur_index] ) >= int(substr[0] ): UpperCAmelCase_ : Dict = substr[1:] + n[cur_index] cur_index += 1 else: UpperCAmelCase_ : List[str] = max(_lowercase , str_eval(_lowercase ) ) UpperCAmelCase_ : Tuple = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(F"""{solution() = }""")
30
import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() __a = [ 'word_embeddings_layernorm.weight', 'word_embeddings_layernorm.bias', 'input_layernorm.weight', 'input_layernorm.bias', 'post_attention_layernorm.weight', 'post_attention_layernorm.bias', 'self_attention.dense.bias', 'mlp.dense_4h_to_h.bias', 'ln_f.weight', 'ln_f.bias', ] __a = [ 'mlp.dense_4h_to_h.weight', 'self_attention.dense.weight', ] def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = { '''word_embeddings.weight''': '''word_embeddings.weight''', '''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''', '''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''', '''weight''': '''ln_f.weight''', '''bias''': '''ln_f.bias''', } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks UpperCAmelCase_ : Union[str, Any] = int(re.match(r'''.*layer_(\d*).*''' , _lowercase )[1] ) layer_number -= 3 return f'''h.{layer_number}.''' + key def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if dtype == torch.bool: return 1 / 8 UpperCAmelCase_ : Any = re.search(r'''[^\d](\d+)$''' , str(_lowercase ) ) if bit_search is None: raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' ) UpperCAmelCase_ : Optional[int] = int(bit_search.groups()[0] ) return bit_size // 8 def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' if bloom_config_file == "": UpperCAmelCase_ : Tuple = BloomConfig() else: UpperCAmelCase_ : Optional[int] = BloomConfig.from_json_file(_lowercase ) if shard_model: UpperCAmelCase_ : Any = os.listdir(_lowercase ) UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) ) UpperCAmelCase_ : Any = {'''weight_map''': {}, '''metadata''': {}} UpperCAmelCase_ : List[str] = 0 UpperCAmelCase_ : Any = None UpperCAmelCase_ : Optional[int] = BloomConfig() for j, file in enumerate(_lowercase ): print('''Processing file: {}'''.format(_lowercase ) ) UpperCAmelCase_ : Optional[Any] = None for i in range(_lowercase ): # load all TP files UpperCAmelCase_ : Tuple = file.replace('''model_00''' , f'''model_0{i}''' ) UpperCAmelCase_ : Any = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' ) # Rename keys in the transformers names UpperCAmelCase_ : Dict = list(temp.keys() ) for key in keys: UpperCAmelCase_ : Union[str, Any] = temp.pop(_lowercase ) if tensors is None: UpperCAmelCase_ : Union[str, Any] = temp else: for key in tensors.keys(): if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel UpperCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks UpperCAmelCase_ : Tuple = torch.cat([tensors[key], temp[key]] , dim=_lowercase ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): UpperCAmelCase_ : List[str] = tensors[key] / pretraining_tp torch.save( _lowercase , os.path.join( _lowercase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): UpperCAmelCase_ : Union[str, Any] = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: UpperCAmelCase_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format( str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) UpperCAmelCase_ : List[Any] = BloomConfig() UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME UpperCAmelCase_ : List[str] = total_size with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) with open(os.path.join(_lowercase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f: UpperCAmelCase_ : Optional[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n''' f.write(_lowercase ) else: UpperCAmelCase_ : Any = BloomModel(_lowercase ) UpperCAmelCase_ : Tuple = os.listdir(_lowercase ) UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) ) UpperCAmelCase_ : Any = None for i, file in enumerate(_lowercase ): UpperCAmelCase_ : Optional[Any] = None for i in range(_lowercase ): # load all TP files UpperCAmelCase_ : List[Any] = file.replace('''model_00''' , f'''model_0{i}''' ) UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' ) # Rename keys in the transformers names UpperCAmelCase_ : str = list(temp.keys() ) for key in keys: UpperCAmelCase_ : Dict = temp.pop(_lowercase ) if tensors is None: UpperCAmelCase_ : int = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel UpperCAmelCase_ : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks UpperCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_lowercase ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): UpperCAmelCase_ : Dict = tensors[key] / pretraining_tp UpperCAmelCase_ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase ) assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected''' if missing_keys is None: UpperCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys ) else: UpperCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, f'''The keys {missing_keys} are missing''' # Save pytorch-model os.makedirs(_lowercase , exist_ok=_lowercase ) UpperCAmelCase_ : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' ) if config.torch_dtype is not None: UpperCAmelCase_ : Optional[int] = model.to(config.torch_dtype ) torch.save(model.state_dict() , _lowercase ) print(f'''Save configuration file to {pytorch_config_dump_path}''' ) with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--bloom_checkpoint_path', default=None, type=str, required=True, help='Path to the Megatron-LM checkpoint path.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--bloom_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--shard_model', action='store_true', help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint', ) parser.add_argument( '--pretraining_tp', default=4, type=int, help='Pretraining TP rank that has been used when training the model in Megatron-LM \n', ) __a = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
30
1