code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase : Optional[int] = { 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Tuple = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): self.assertEqual(len(_lowerCAmelCase ) ,len(_lowerCAmelCase ) ) for a, b in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertAlmostEqual(_lowerCAmelCase ,_lowerCAmelCase ,delta=_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(_lowerCAmelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step ,3 ) self.assertEqual(len(accumulator.gradients ) ,1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[-2.0, 5.0] ,tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step ,0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[0.0, 0.0] ,tol=1E-2 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None ops.enable_eager_execution_internal() lowerCamelCase__ = tf.config.list_physical_devices("""CPU""" ) if len(_lowerCAmelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] ,[tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) lowerCamelCase__ = tf.config.list_logical_devices(device_type="""CPU""" ) lowerCamelCase__ = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): lowerCamelCase__ = GradientAccumulator() lowerCamelCase__ = tf.Variable([4.0, 3.0] ) lowerCamelCase__ , lowerCamelCase__ = create_optimizer(5E-5 ,10 ,5 ) lowerCamelCase__ = tf.Variable([0.0, 0.0] ,trainable=_lowerCAmelCase ) def accumulate_on_replica(_lowerCAmelCase ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients ,[variable] ) ) ) @tf.function def accumulate(_lowerCAmelCase ,_lowerCAmelCase ): with strategy.scope(): lowerCamelCase__ = strategy.experimental_local_results(_lowerCAmelCase ) local_variables[0].assign(_lowerCAmelCase ) local_variables[1].assign(_lowerCAmelCase ) strategy.run(_lowerCAmelCase ,args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(_lowerCAmelCase ) def _check_local_values(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() ,_lowerCAmelCase ,tol=1E-2 ) self.assertListAlmostEqual(values[1].value() ,_lowerCAmelCase ,tol=1E-2 ) accumulate([1.0, 2.0] ,[-1.0, 1.0] ) accumulate([3.0, -1.0] ,[-1.0, -1.0] ) accumulate([-2.0, 2.0] ,[3.0, -2.0] ) self.assertEqual(accumulator.step ,3 ) _check_local_values([2.0, 3.0] ,[1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() ,[4.0, 3.0] ,tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step ,0 ) _check_local_values([0.0, 0.0] ,[0.0, 0.0] )
50
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Dict = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'codegen' _UpperCamelCase = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_ctx lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = rotary_dim lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = use_cache lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__( bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,): super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase ) if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ): # TODO: how to do that better? lowerCamelCase__ = 0 @property def UpperCamelCase_ ( self ): lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" ) lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""} else: lowerCamelCase__ = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCamelCase_ ( self ): return self._config.n_layer @property def UpperCamelCase_ ( self ): return self._config.n_head def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,): lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs( _lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase ) # We need to order the input in the way they appears in the forward() lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase__ = seqlen + 2 lowerCamelCase__ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase__ = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers ) ] lowerCamelCase__ = common_inputs["""attention_mask"""] if self.use_past: lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype lowerCamelCase__ = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 ) return ordered_inputs @property def UpperCamelCase_ ( self ): return 13
50
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase : Tuple = { 'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Any = [ 'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTBigCodeForSequenceClassification', 'GPTBigCodeForTokenClassification', 'GPTBigCodeForCausalLM', 'GPTBigCodeModel', 'GPTBigCodePreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : int = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' def A__ ( ): for n in range(1 , 100_0000 ): yield n * (n + 1) // 2 def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = 1 lowerCamelCase__ = 2 while i * i <= n: lowerCamelCase__ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def A__ ( ): return next(i for i in triangle_number_generator() if count_divisors(__lowerCAmelCase ) > 500 ) if __name__ == "__main__": print(solution())
50
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(__lowerCAmelCase , torch.Tensor ): raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" ) lowerCamelCase__ = v.half() if save_path is None: # overwrite src_path lowerCamelCase__ = src_path torch.save(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": fire.Fire(convert)
50
1
'''simple docstring''' import argparse import os import re import packaging.version UpperCamelCase : List[Any] = 'examples/' UpperCamelCase : int = { 'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), 'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCamelCase : Any = { 'init': 'src/transformers/__init__.py', 'setup': 'setup.py', } UpperCamelCase : Any = 'README.md' def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern] lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase ) lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : str ): for folder, directories, fnames in os.walk(__lowerCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not patch: update_version_in_examples(__lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = """🤗 Transformers currently provides the following architectures""" lowerCamelCase__ = """1. Want to contribute a new model?""" with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Find the start of the list. lowerCamelCase__ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase__ = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__lowerCAmelCase ) def A__ ( ): with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0] return packaging.version.parse(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Union[str, Any]=False ): lowerCamelCase__ = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase__ = default_version.base_version elif patch: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = default_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def A__ ( ): lowerCamelCase__ = get_version() lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase__ = current_version.base_version # Check with the user we got that right. lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = dev_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCamelCase : Any = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
50
'''simple docstring''' import os from pathlib import Path def A__ ( ): from torch.utils.cpp_extension import load lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" lowerCamelCase__ = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
50
1
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : int = logging.get_logger(__name__) # TODO Update this UpperCamelCase : Tuple = { 'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json', # See all ESM models at https://huggingface.co/models?filter=esm } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'esm' def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10_26 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase="absolute" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__(pad_token_id=_lowerCAmelCase ,mask_token_id=_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = initializer_range lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = position_embedding_type lowerCamelCase__ = use_cache lowerCamelCase__ = emb_layer_norm_before lowerCamelCase__ = token_dropout lowerCamelCase__ = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("""No esmfold_config supplied for folding model, using default values.""" ) lowerCamelCase__ = EsmFoldConfig() elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = EsmFoldConfig(**_lowerCAmelCase ) lowerCamelCase__ = esmfold_config if vocab_list is None: logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" ) lowerCamelCase__ = get_default_vocab_list() else: lowerCamelCase__ = vocab_list else: lowerCamelCase__ = None lowerCamelCase__ = None if self.esmfold_config is not None and getattr(self.esmfold_config ,"""use_esm_attn_map""" ,_lowerCAmelCase ): raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ = super().to_dict() if isinstance(self.esmfold_config ,_lowerCAmelCase ): lowerCamelCase__ = self.esmfold_config.to_dict() return output @dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = None _UpperCamelCase = True _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = 0 _UpperCamelCase = True _UpperCamelCase = False _UpperCamelCase = 128 _UpperCamelCase = None def UpperCamelCase_ ( self ): if self.trunk is None: lowerCamelCase__ = TrunkConfig() elif isinstance(self.trunk ,_lowerCAmelCase ): lowerCamelCase__ = TrunkConfig(**self.trunk ) def UpperCamelCase_ ( self ): lowerCamelCase__ = asdict(self ) lowerCamelCase__ = self.trunk.to_dict() return output @dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = 48 _UpperCamelCase = 1024 _UpperCamelCase = 128 _UpperCamelCase = 32 _UpperCamelCase = 32 _UpperCamelCase = 32 _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = False _UpperCamelCase = 4 _UpperCamelCase = 128 _UpperCamelCase = None def UpperCamelCase_ ( self ): if self.structure_module is None: lowerCamelCase__ = StructureModuleConfig() elif isinstance(self.structure_module ,_lowerCAmelCase ): lowerCamelCase__ = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( """`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got""" F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( """`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got""" F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowerCamelCase__ = self.sequence_state_dim // self.sequence_head_width lowerCamelCase__ = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( """`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got""" F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( """`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got""" F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def UpperCamelCase_ ( self ): lowerCamelCase__ = asdict(self ) lowerCamelCase__ = self.structure_module.to_dict() return output @dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = 384 _UpperCamelCase = 128 _UpperCamelCase = 16 _UpperCamelCase = 128 _UpperCamelCase = 12 _UpperCamelCase = 4 _UpperCamelCase = 8 _UpperCamelCase = 0.1 _UpperCamelCase = 8 _UpperCamelCase = 1 _UpperCamelCase = 2 _UpperCamelCase = 7 _UpperCamelCase = 10 _UpperCamelCase = 1e-8 _UpperCamelCase = 1e5 def UpperCamelCase_ ( self ): return asdict(self ) def A__ ( ): return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
50
'''simple docstring''' def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ): lowerCamelCase__ = len(__lowerCAmelCase ) print("""The following activities are selected:""" ) # The first activity is always selected lowerCamelCase__ = 0 print(__lowerCAmelCase , end=""",""" ) # Consider rest of the activities for j in range(__lowerCAmelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(__lowerCAmelCase , end=""",""" ) lowerCamelCase__ = j if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5] UpperCamelCase : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : List[Any] ): lowerCamelCase__ = 1 lowerCamelCase__ = 2 while i * i <= n: lowerCamelCase__ = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def A__ ( ): lowerCamelCase__ = 1 lowerCamelCase__ = 1 while True: i += 1 t_num += i if count_divisors(__lowerCAmelCase ) > 500: break return t_num if __name__ == "__main__": print(solution())
50
'''simple docstring''' import warnings from ..trainer import Trainer from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ): warnings.warn( """`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """ """instead.""" ,_lowerCAmelCase ,) super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = """hf-internal-testing/tiny-random-t5""" lowerCamelCase__ = AutoTokenizer.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = tokenizer("""This is me""" ,return_tensors="""pt""" ) lowerCamelCase__ = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) lowerCamelCase__ = model.generate(**_lowerCAmelCase ) lowerCamelCase__ = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ) lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) lowerCamelCase__ = model_reloaded.generate(**_lowerCAmelCase ) self.assertTrue(torch.allclose(_lowerCAmelCase ,_lowerCAmelCase ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """hf-internal-testing/tiny-random-t5""" lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(_lowerCAmelCase ): model.save_pretrained(_lowerCAmelCase ) lowerCamelCase__ = model.reverse_bettertransformer() model.save_pretrained(_lowerCAmelCase )
50
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = [] for line in lines: lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments if line: filtered_lines.append(__lowerCAmelCase ) lowerCamelCase__ = """\n""".join(__lowerCAmelCase ) # Make a hash from all this code lowerCamelCase__ = full_str.encode("""utf-8""" ) return shaaaa(__lowerCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCamelCase : Dict = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCamelCase : str = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCamelCase : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
50
1
'''simple docstring''' import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): # A mock response for an HTTP head request to emulate server down lowerCamelCase__ = mock.Mock() lowerCamelCase__ = 5_00 lowerCamelCase__ = {} lowerCamelCase__ = HTTPError lowerCamelCase__ = {} # Download this model to make sure it's in the cache. lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" ,return_value=_lowerCAmelCase ) as mock_head: lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def UpperCamelCase_ ( self ): # A mock response for an HTTP head request to emulate server down lowerCamelCase__ = mock.Mock() lowerCamelCase__ = 5_00 lowerCamelCase__ = {} lowerCamelCase__ = HTTPError lowerCamelCase__ = {} # Download this model to make sure it's in the cache. lowerCamelCase__ = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" ,return_value=_lowerCAmelCase ) as mock_head: lowerCamelCase__ = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self ): # This test is for deprecated behavior and can be removed in v5 try: lowerCamelCase__ = tempfile.mktemp() with open(_lowerCAmelCase ,"""wb""" ) as f: http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,_lowerCAmelCase ) lowerCamelCase__ = AlbertTokenizer.from_pretrained(_lowerCAmelCase ) finally: os.remove(_lowerCAmelCase ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("""tokenizer.json""" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("""tokenizer.json""" ,"""wb""" ) as f: http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,_lowerCAmelCase ) lowerCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size ,10_00 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("""tokenizer.json""" ) def UpperCamelCase_ ( self ): # This test is for deprecated behavior and can be removed in v5 lowerCamelCase__ = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ) @is_staging_test class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' _UpperCamelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] @classmethod def UpperCamelCase_ ( cls ): lowerCamelCase__ = TOKEN HfFolder.save_token(_lowerCAmelCase ) @classmethod def UpperCamelCase_ ( cls ): try: delete_repo(token=cls._token ,repo_id="""test-tokenizer""" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" ) except HTTPError: pass def UpperCamelCase_ ( self ): with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""vocab.txt""" ) with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowerCamelCase__ = BertTokenizer(_lowerCAmelCase ) tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token ) lowerCamelCase__ = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="""test-tokenizer""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_lowerCAmelCase ,repo_id="""test-tokenizer""" ,push_to_hub=_lowerCAmelCase ,use_auth_token=self._token ) lowerCamelCase__ = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) def UpperCamelCase_ ( self ): with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""vocab.txt""" ) with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowerCamelCase__ = BertTokenizer(_lowerCAmelCase ) tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token ) lowerCamelCase__ = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( _lowerCAmelCase ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=_lowerCAmelCase ,use_auth_token=self._token ) lowerCamelCase__ = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) @require_tokenizers def UpperCamelCase_ ( self ): CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""vocab.txt""" ) with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowerCamelCase__ = CustomTokenizer(_lowerCAmelCase ) # No fast custom tokenizer tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token ) lowerCamelCase__ = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=_lowerCAmelCase ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""vocab.txt""" ) with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowerCamelCase__ = BertTokenizerFast.from_pretrained(_lowerCAmelCase ) bert_tokenizer.save_pretrained(_lowerCAmelCase ) lowerCamelCase__ = CustomTokenizerFast.from_pretrained(_lowerCAmelCase ) tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token ) lowerCamelCase__ = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=_lowerCAmelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" ) lowerCamelCase__ = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''' ,use_fast=_lowerCAmelCase ,trust_remote_code=_lowerCAmelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" ) class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = Trie() trie.add("""Hello 友達""" ) self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) trie.add("""Hello""" ) trie.data self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) def UpperCamelCase_ ( self ): lowerCamelCase__ = Trie() self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] ) trie.add("""[CLS]""" ) trie.add("""extra_id_1""" ) trie.add("""extra_id_100""" ) self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = Trie() trie.add("""A""" ) self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] ) self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = Trie() trie.add("""TOKEN]""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = Trie() trie.add("""A""" ) trie.add("""P""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = Trie() trie.add("""AB""" ) trie.add("""B""" ) trie.add("""C""" ) self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = Trie() trie.add("""ABC""" ) trie.add("""B""" ) trie.add("""CD""" ) self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] ) def UpperCamelCase_ ( self ): # Even if the offsets are wrong, we necessarily output correct string # parts. lowerCamelCase__ = Trie() lowerCamelCase__ = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] ) self.assertEqual(_lowerCAmelCase ,["""AB""", """C"""] )
50
'''simple docstring''' import operator def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ): lowerCamelCase__ = operator.lt if reverse else operator.gt lowerCamelCase__ = solution or [] if not arr: return solution lowerCamelCase__ = [arr.pop(0 )] for i, item in enumerate(__lowerCAmelCase ): if _operator(__lowerCAmelCase , sublist[-1] ): sublist.append(__lowerCAmelCase ) arr.pop(__lowerCAmelCase ) # merging sublist into solution list if not solution: solution.extend(__lowerCAmelCase ) else: while sublist: lowerCamelCase__ = sublist.pop(0 ) for i, xx in enumerate(__lowerCAmelCase ): if not _operator(__lowerCAmelCase , __lowerCAmelCase ): solution.insert(__lowerCAmelCase , __lowerCAmelCase ) break else: solution.append(__lowerCAmelCase ) strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
50
1
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCamelCase_ ( self ): lowerCamelCase__ = 1 lowerCamelCase__ = 3 lowerCamelCase__ = (32, 32) lowerCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(_lowerCAmelCase ) return image @property def UpperCamelCase_ ( self ): torch.manual_seed(0 ) lowerCamelCase__ = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,) return model @property def UpperCamelCase_ ( self ): torch.manual_seed(0 ) lowerCamelCase__ = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,) return model @property def UpperCamelCase_ ( self ): torch.manual_seed(0 ) lowerCamelCase__ = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) return CLIPTextModel(_lowerCAmelCase ) @property def UpperCamelCase_ ( self ): def extract(*_lowerCAmelCase ,**_lowerCAmelCase ): class UpperCamelCase__ : '''simple docstring''' def __init__( self ): lowerCamelCase__ = torch.ones([0] ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): self.pixel_values.to(_lowerCAmelCase ) return self return Out() return extract def UpperCamelCase_ ( self ): lowerCamelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ = self.dummy_cond_unet lowerCamelCase__ = DDIMScheduler( beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=_lowerCAmelCase ,set_alpha_to_one=_lowerCAmelCase ,) lowerCamelCase__ = self.dummy_vae lowerCamelCase__ = self.dummy_text_encoder lowerCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk lowerCamelCase__ = StableDiffusionPipeline( unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,vae=_lowerCAmelCase ,text_encoder=_lowerCAmelCase ,tokenizer=_lowerCAmelCase ,safety_checker=_lowerCAmelCase ,feature_extractor=self.dummy_extractor ,) lowerCamelCase__ = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = """A painting of a squirrel eating a burger""" lowerCamelCase__ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 ) lowerCamelCase__ = sd_pipe([prompt] ,generator=_lowerCAmelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ) lowerCamelCase__ = output.images lowerCamelCase__ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 ) lowerCamelCase__ = sd_pipe( [prompt] ,generator=_lowerCAmelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=_lowerCAmelCase ,)[0] lowerCamelCase__ = image[0, -3:, -3:, -1] lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): lowerCamelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ = self.dummy_cond_unet lowerCamelCase__ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase ) lowerCamelCase__ = self.dummy_vae lowerCamelCase__ = self.dummy_text_encoder lowerCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk lowerCamelCase__ = StableDiffusionPipeline( unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,vae=_lowerCAmelCase ,text_encoder=_lowerCAmelCase ,tokenizer=_lowerCAmelCase ,safety_checker=_lowerCAmelCase ,feature_extractor=self.dummy_extractor ,) lowerCamelCase__ = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = """A painting of a squirrel eating a burger""" lowerCamelCase__ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 ) lowerCamelCase__ = sd_pipe([prompt] ,generator=_lowerCAmelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ) lowerCamelCase__ = output.images lowerCamelCase__ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 ) lowerCamelCase__ = sd_pipe( [prompt] ,generator=_lowerCAmelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=_lowerCAmelCase ,)[0] lowerCamelCase__ = image[0, -3:, -3:, -1] lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): lowerCamelCase__ = StableDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=_lowerCAmelCase ) assert isinstance(_lowerCAmelCase ,_lowerCAmelCase ) assert isinstance(pipe.scheduler ,_lowerCAmelCase ) assert pipe.safety_checker is None lowerCamelCase__ = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_lowerCAmelCase ) lowerCamelCase__ = StableDiffusionPipeline.from_pretrained(_lowerCAmelCase ) # sanity check that the pipeline still works assert pipe.safety_checker is None lowerCamelCase__ = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.dummy_cond_unet lowerCamelCase__ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase ) lowerCamelCase__ = self.dummy_vae lowerCamelCase__ = self.dummy_text_encoder lowerCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # put models in fp16 lowerCamelCase__ = unet.half() lowerCamelCase__ = vae.half() lowerCamelCase__ = bert.half() # make sure here that pndm scheduler skips prk lowerCamelCase__ = StableDiffusionPipeline( unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,vae=_lowerCAmelCase ,text_encoder=_lowerCAmelCase ,tokenizer=_lowerCAmelCase ,safety_checker=_lowerCAmelCase ,feature_extractor=self.dummy_extractor ,) lowerCamelCase__ = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = """A painting of a squirrel eating a burger""" lowerCamelCase__ = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self ): lowerCamelCase__ = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=_lowerCAmelCase ) lowerCamelCase__ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) lowerCamelCase__ = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = ( """portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle""" """ coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with""" """ anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and""" """ children from bahnhof zoo, detailed """ ) lowerCamelCase__ = 40_03_66_03_46 lowerCamelCase__ = 7 # without safety guidance (sld_guidance_scale = 0) lowerCamelCase__ = torch.manual_seed(_lowerCAmelCase ) lowerCamelCase__ = sd_pipe( [prompt] ,generator=_lowerCAmelCase ,guidance_scale=_lowerCAmelCase ,num_inference_steps=50 ,output_type="""np""" ,width=5_12 ,height=5_12 ,sld_guidance_scale=0 ,) lowerCamelCase__ = output.images lowerCamelCase__ = image[0, -3:, -3:, -1] lowerCamelCase__ = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) lowerCamelCase__ = torch.manual_seed(_lowerCAmelCase ) lowerCamelCase__ = sd_pipe( [prompt] ,generator=_lowerCAmelCase ,guidance_scale=_lowerCAmelCase ,num_inference_steps=50 ,output_type="""np""" ,width=5_12 ,height=5_12 ,sld_guidance_scale=20_00 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) lowerCamelCase__ = output.images lowerCamelCase__ = image[0, -3:, -3:, -1] lowerCamelCase__ = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): lowerCamelCase__ = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=_lowerCAmelCase ) lowerCamelCase__ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) lowerCamelCase__ = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = """padme amidala taking a bath artwork, safe for work, no nudity""" lowerCamelCase__ = 27_34_97_17_55 lowerCamelCase__ = 7 lowerCamelCase__ = torch.manual_seed(_lowerCAmelCase ) lowerCamelCase__ = sd_pipe( [prompt] ,generator=_lowerCAmelCase ,guidance_scale=_lowerCAmelCase ,num_inference_steps=50 ,output_type="""np""" ,width=5_12 ,height=5_12 ,sld_guidance_scale=0 ,) lowerCamelCase__ = output.images lowerCamelCase__ = image[0, -3:, -3:, -1] lowerCamelCase__ = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 lowerCamelCase__ = torch.manual_seed(_lowerCAmelCase ) lowerCamelCase__ = sd_pipe( [prompt] ,generator=_lowerCAmelCase ,guidance_scale=_lowerCAmelCase ,num_inference_steps=50 ,output_type="""np""" ,width=5_12 ,height=5_12 ,sld_guidance_scale=20_00 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) lowerCamelCase__ = output.images lowerCamelCase__ = image[0, -3:, -3:, -1] lowerCamelCase__ = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): lowerCamelCase__ = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ) lowerCamelCase__ = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = ( """the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.""" """ leyendecker""" ) lowerCamelCase__ = 10_44_35_52_34 lowerCamelCase__ = 12 lowerCamelCase__ = torch.manual_seed(_lowerCAmelCase ) lowerCamelCase__ = sd_pipe( [prompt] ,generator=_lowerCAmelCase ,guidance_scale=_lowerCAmelCase ,num_inference_steps=50 ,output_type="""np""" ,width=5_12 ,height=5_12 ,sld_guidance_scale=0 ,) lowerCamelCase__ = output.images lowerCamelCase__ = image[0, -3:, -3:, -1] lowerCamelCase__ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 lowerCamelCase__ = torch.manual_seed(_lowerCAmelCase ) lowerCamelCase__ = sd_pipe( [prompt] ,generator=_lowerCAmelCase ,guidance_scale=_lowerCAmelCase ,num_inference_steps=50 ,output_type="""np""" ,width=5_12 ,height=5_12 ,sld_guidance_scale=20_00 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) lowerCamelCase__ = output.images lowerCamelCase__ = image[0, -3:, -3:, -1] lowerCamelCase__ = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
50
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A__ ( __lowerCAmelCase : dict ): return (data["data"], data["target"]) def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ): lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__lowerCAmelCase , __lowerCAmelCase ) # Predict target for test data lowerCamelCase__ = xgb.predict(__lowerCAmelCase ) lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 ) return predictions def A__ ( ): lowerCamelCase__ = fetch_california_housing() lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split( __lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 ) lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
50
1
'''simple docstring''' from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ): lowerCamelCase__ = [] for part_id in partition_order: lowerCamelCase__ = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(__lowerCAmelCase ): expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def A__ ( ): lowerCamelCase__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() lowerCamelCase__ = spark.range(100 ).repartition(1 ) lowerCamelCase__ = Spark(__lowerCAmelCase ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def A__ ( ): lowerCamelCase__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() lowerCamelCase__ = spark.range(10 ).repartition(2 ) lowerCamelCase__ = [1, 0] lowerCamelCase__ = _generate_iterable_examples(__lowerCAmelCase , __lowerCAmelCase ) # Reverse the partitions. lowerCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , __lowerCAmelCase ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowerCamelCase__ , lowerCamelCase__ = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def A__ ( ): lowerCamelCase__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() lowerCamelCase__ = spark.range(10 ).repartition(1 ) lowerCamelCase__ = SparkExamplesIterable(__lowerCAmelCase ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ): assert row_id == F'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def A__ ( ): lowerCamelCase__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() lowerCamelCase__ = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("""numpy.random.Generator""" ) as generator_mock: lowerCamelCase__ = lambda __lowerCAmelCase : x.reverse() lowerCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [2, 1, 0] ) lowerCamelCase__ = SparkExamplesIterable(__lowerCAmelCase ).shuffle_data_sources(__lowerCAmelCase ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ): lowerCamelCase__ , lowerCamelCase__ = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def A__ ( ): lowerCamelCase__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() lowerCamelCase__ = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 lowerCamelCase__ = SparkExamplesIterable(__lowerCAmelCase ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowerCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [0, 2] ) for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ): lowerCamelCase__ , lowerCamelCase__ = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowerCamelCase__ = SparkExamplesIterable(__lowerCAmelCase ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowerCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [1, 3] ) for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ): lowerCamelCase__ , lowerCamelCase__ = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def A__ ( ): lowerCamelCase__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() lowerCamelCase__ = spark.range(100 ).repartition(1 ) lowerCamelCase__ = Spark(__lowerCAmelCase ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
50
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = jnp.ones((batch_size, length) ) / length return scores def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 20 lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase ) # tweak scores to not be uniform anymore lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create ramp distribution lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowerCamelCase__ = 5 lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 ) lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy() lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # check edge cases with negative and extreme logits lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowerCamelCase__ = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) # check that min length is applied at length 5 lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 ) lowerCamelCase__ = 5 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] ) # check that min length is not applied anymore at length 15 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = 15 lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the bos_token_id score lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 ) lowerCamelCase__ = 1 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = 5 lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the eos_token_id when max_length is reached lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 ) lowerCamelCase__ = 4 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # with processor list lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores # with processor list def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
50
1
'''simple docstring''' import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ): if isinstance(__lowerCAmelCase , torch.Tensor ): return image elif isinstance(__lowerCAmelCase , PIL.Image.Image ): lowerCamelCase__ = [image] if isinstance(image[0] , PIL.Image.Image ): lowerCamelCase__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image] lowerCamelCase__ = np.concatenate(__lowerCAmelCase , axis=0 ) lowerCamelCase__ = np.array(__lowerCAmelCase ).astype(np.floataa ) / 255.0 lowerCamelCase__ = image.transpose(0 , 3 , 1 , 2 ) lowerCamelCase__ = 2.0 * image - 1.0 lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase ) elif isinstance(image[0] , torch.Tensor ): lowerCamelCase__ = torch.cat(__lowerCAmelCase , dim=0 ) return image def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any=0.9995 ): if not isinstance(__lowerCAmelCase , np.ndarray ): lowerCamelCase__ = True lowerCamelCase__ = va.device lowerCamelCase__ = va.cpu().numpy() lowerCamelCase__ = va.cpu().numpy() lowerCamelCase__ = np.sum(va * va / (np.linalg.norm(__lowerCAmelCase ) * np.linalg.norm(__lowerCAmelCase )) ) if np.abs(__lowerCAmelCase ) > DOT_THRESHOLD: lowerCamelCase__ = (1 - t) * va + t * va else: lowerCamelCase__ = np.arccos(__lowerCAmelCase ) lowerCamelCase__ = np.sin(__lowerCAmelCase ) lowerCamelCase__ = theta_a * t lowerCamelCase__ = np.sin(__lowerCAmelCase ) lowerCamelCase__ = np.sin(theta_a - theta_t ) / sin_theta_a lowerCamelCase__ = sin_theta_t / sin_theta_a lowerCamelCase__ = sa * va + sa * va if inputs_are_torch: lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase ) return va def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ): lowerCamelCase__ = F.normalize(__lowerCAmelCase , dim=-1 ) lowerCamelCase__ = F.normalize(__lowerCAmelCase , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ): for param in model.parameters(): lowerCamelCase__ = value class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,): super().__init__() self.register_modules( vae=_lowerCAmelCase ,text_encoder=_lowerCAmelCase ,clip_model=_lowerCAmelCase ,tokenizer=_lowerCAmelCase ,unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase ,coca_model=_lowerCAmelCase ,coca_tokenizer=_lowerCAmelCase ,coca_transform=_lowerCAmelCase ,) lowerCamelCase__ = ( feature_extractor.size if isinstance(feature_extractor.size ,_lowerCAmelCase ) else feature_extractor.size["""shortest_edge"""] ) lowerCamelCase__ = transforms.Normalize(mean=feature_extractor.image_mean ,std=feature_extractor.image_std ) set_requires_grad(self.text_encoder ,_lowerCAmelCase ) set_requires_grad(self.clip_model ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCamelCase__ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_lowerCAmelCase ) def UpperCamelCase_ ( self ): self.enable_attention_slicing(_lowerCAmelCase ) def UpperCamelCase_ ( self ): set_requires_grad(self.vae ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): set_requires_grad(self.vae ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): set_requires_grad(self.unet ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): set_requires_grad(self.unet ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): # get the original timestep using init_timestep lowerCamelCase__ = min(int(num_inference_steps * strength ) ,_lowerCAmelCase ) lowerCamelCase__ = max(num_inference_steps - init_timestep ,0 ) lowerCamelCase__ = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ): if not isinstance(_lowerCAmelCase ,torch.Tensor ): raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(_lowerCAmelCase )}''' ) lowerCamelCase__ = image.to(device=_lowerCAmelCase ,dtype=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowerCAmelCase ) ] lowerCamelCase__ = torch.cat(_lowerCAmelCase ,dim=0 ) else: lowerCamelCase__ = self.vae.encode(_lowerCAmelCase ).latent_dist.sample(_lowerCAmelCase ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowerCamelCase__ = 0.1_8215 * init_latents lowerCamelCase__ = init_latents.repeat_interleave(_lowerCAmelCase ,dim=0 ) lowerCamelCase__ = randn_tensor(init_latents.shape ,generator=_lowerCAmelCase ,device=_lowerCAmelCase ,dtype=_lowerCAmelCase ) # get latents lowerCamelCase__ = self.scheduler.add_noise(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = init_latents return latents def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = self.coca_transform(_lowerCAmelCase ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): lowerCamelCase__ = self.coca_model.generate(transformed_image.to(device=self.device ,dtype=self.coca_model.dtype ) ) lowerCamelCase__ = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" ,"""""" ).rstrip(""" .,""" ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.feature_extractor.preprocess(_lowerCAmelCase ) lowerCamelCase__ = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half() lowerCamelCase__ = self.clip_model.get_image_features(_lowerCAmelCase ) lowerCamelCase__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=_lowerCAmelCase ) lowerCamelCase__ = image_embeddings_clip.repeat_interleave(_lowerCAmelCase ,dim=0 ) return image_embeddings_clip @torch.enable_grad() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,): lowerCamelCase__ = latents.detach().requires_grad_() lowerCamelCase__ = self.scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase ) # predict the noise residual lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ).sample if isinstance(self.scheduler ,(PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): lowerCamelCase__ = self.scheduler.alphas_cumprod[timestep] lowerCamelCase__ = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowerCamelCase__ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 lowerCamelCase__ = torch.sqrt(_lowerCAmelCase ) lowerCamelCase__ = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler ,_lowerCAmelCase ): lowerCamelCase__ = self.scheduler.sigmas[index] lowerCamelCase__ = latents - sigma * noise_pred else: raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowerCamelCase__ = 1 / 0.1_8215 * sample lowerCamelCase__ = self.vae.decode(_lowerCAmelCase ).sample lowerCamelCase__ = (image / 2 + 0.5).clamp(0 ,1 ) lowerCamelCase__ = transforms.Resize(self.feature_extractor_size )(_lowerCAmelCase ) lowerCamelCase__ = self.normalize(_lowerCAmelCase ).to(latents.dtype ) lowerCamelCase__ = self.clip_model.get_image_features(_lowerCAmelCase ) lowerCamelCase__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=_lowerCAmelCase ) lowerCamelCase__ = spherical_dist_loss(_lowerCAmelCase ,_lowerCAmelCase ).mean() * clip_guidance_scale lowerCamelCase__ = -torch.autograd.grad(_lowerCAmelCase ,_lowerCAmelCase )[0] if isinstance(self.scheduler ,_lowerCAmelCase ): lowerCamelCase__ = latents.detach() + grads * (sigma**2) lowerCamelCase__ = noise_pred_original else: lowerCamelCase__ = noise_pred_original - torch.sqrt(_lowerCAmelCase ) * grads return noise_pred, latents @torch.no_grad() def __call__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = 5_12 ,_lowerCAmelCase = 5_12 ,_lowerCAmelCase = 0.6 ,_lowerCAmelCase = 50 ,_lowerCAmelCase = 7.5 ,_lowerCAmelCase = 1 ,_lowerCAmelCase = 0.0 ,_lowerCAmelCase = 1_00 ,_lowerCAmelCase = None ,_lowerCAmelCase = "pil" ,_lowerCAmelCase = True ,_lowerCAmelCase = 0.8 ,_lowerCAmelCase = 0.1 ,_lowerCAmelCase = 0.1 ,): if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size: raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(_lowerCAmelCase )} generators.''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if isinstance(_lowerCAmelCase ,torch.Generator ) and batch_size > 1: lowerCamelCase__ = [generator] + [None] * (batch_size - 1) lowerCamelCase__ = [ ("""model""", self.coca_model is None), ("""tokenizer""", self.coca_tokenizer is None), ("""transform""", self.coca_transform is None), ] lowerCamelCase__ = [x[0] for x in coca_is_none if x[1]] lowerCamelCase__ = """, """.join(_lowerCAmelCase ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(_lowerCAmelCase ): raise ValueError( F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.''' F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) lowerCamelCase__ = self.get_image_description(_lowerCAmelCase ) if style_prompt is None: if len(_lowerCAmelCase ): raise ValueError( F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.''' F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) lowerCamelCase__ = self.get_image_description(_lowerCAmelCase ) # get prompt text embeddings for content and style lowerCamelCase__ = self.tokenizer( _lowerCAmelCase ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=_lowerCAmelCase ,return_tensors="""pt""" ,) lowerCamelCase__ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] lowerCamelCase__ = self.tokenizer( _lowerCAmelCase ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=_lowerCAmelCase ,return_tensors="""pt""" ,) lowerCamelCase__ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] lowerCamelCase__ = slerp(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # duplicate text embeddings for each generation per prompt lowerCamelCase__ = text_embeddings.repeat_interleave(_lowerCAmelCase ,dim=0 ) # set timesteps lowerCamelCase__ = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) lowerCamelCase__ = {} if accepts_offset: lowerCamelCase__ = 1 self.scheduler.set_timesteps(_lowerCAmelCase ,**_lowerCAmelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) lowerCamelCase__ , lowerCamelCase__ = self.get_timesteps(_lowerCAmelCase ,_lowerCAmelCase ,self.device ) lowerCamelCase__ = timesteps[:1].repeat(_lowerCAmelCase ) # Preprocess image lowerCamelCase__ = preprocess(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = self.prepare_latents( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,text_embeddings.dtype ,self.device ,_lowerCAmelCase ) lowerCamelCase__ = preprocess(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = self.prepare_latents( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,text_embeddings.dtype ,self.device ,_lowerCAmelCase ) lowerCamelCase__ = slerp(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) if clip_guidance_scale > 0: lowerCamelCase__ = self.get_clip_image_embeddings(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = self.get_clip_image_embeddings(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = slerp( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowerCamelCase__ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowerCamelCase__ = content_text_input.input_ids.shape[-1] lowerCamelCase__ = self.tokenizer([""""""] ,padding="""max_length""" ,max_length=_lowerCAmelCase ,return_tensors="""pt""" ) lowerCamelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt lowerCamelCase__ = uncond_embeddings.repeat_interleave(_lowerCAmelCase ,dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowerCamelCase__ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowerCamelCase__ = (batch_size, self.unet.config.in_channels, height // 8, width // 8) lowerCamelCase__ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps lowerCamelCase__ = torch.randn(_lowerCAmelCase ,generator=_lowerCAmelCase ,device="""cpu""" ,dtype=_lowerCAmelCase ).to( self.device ) else: lowerCamelCase__ = torch.randn(_lowerCAmelCase ,generator=_lowerCAmelCase ,device=self.device ,dtype=_lowerCAmelCase ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) lowerCamelCase__ = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowerCamelCase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowerCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCamelCase__ = {} if accepts_eta: lowerCamelCase__ = eta # check if the scheduler accepts generator lowerCamelCase__ = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: lowerCamelCase__ = generator with self.progress_bar(total=_lowerCAmelCase ): for i, t in enumerate(_lowerCAmelCase ): # expand the latents if we are doing classifier free guidance lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowerCamelCase__ = self.scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase ) # predict the noise residual lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ).sample # perform classifier free guidance if do_classifier_free_guidance: lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 ) lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: lowerCamelCase__ = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) lowerCamelCase__ , lowerCamelCase__ = self.cond_fn( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,) # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase__ = self.scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowerCamelCase__ = 1 / 0.1_8215 * latents lowerCamelCase__ = self.vae.decode(_lowerCAmelCase ).sample lowerCamelCase__ = (image / 2 + 0.5).clamp(0 ,1 ) lowerCamelCase__ = image.cpu().permute(0 ,2 ,3 ,1 ).numpy() if output_type == "pil": lowerCamelCase__ = self.numpy_to_pil(_lowerCAmelCase ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=_lowerCAmelCase ,nsfw_content_detected=_lowerCAmelCase )
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase : Any = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCamelCase : Dict = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = ['PerceiverFeatureExtractor'] UpperCamelCase : Union[str, Any] = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] ): lowerCamelCase__ = [0 for i in range(r + 1 )] # nc0 = 1 lowerCamelCase__ = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. lowerCamelCase__ = min(__lowerCAmelCase , __lowerCAmelCase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Union[str, Any] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Any = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase : Any = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCamelCase : int = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
50
1
'''simple docstring''' import argparse from collections import defaultdict import yaml UpperCamelCase : Union[str, Any] = 'docs/source/en/_toctree.yml' def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = defaultdict(__lowerCAmelCase ) lowerCamelCase__ = [] lowerCamelCase__ = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(__lowerCAmelCase ) lowerCamelCase__ = new_doc_list lowerCamelCase__ = [key for key, value in counts.items() if value > 1] lowerCamelCase__ = [] for duplicate_key in duplicates: lowerCamelCase__ = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(__lowerCAmelCase ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) lowerCamelCase__ = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : s["title"].lower() ) # "overview" gets special treatment and is always first if len(__lowerCAmelCase ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(__lowerCAmelCase ) # Sort return overview_doc def A__ ( __lowerCAmelCase : Optional[Any]=False ): with open(__lowerCAmelCase , encoding="""utf-8""" ) as f: lowerCamelCase__ = yaml.safe_load(f.read() ) # Get to the API doc lowerCamelCase__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCamelCase__ = content[api_idx]["""sections"""] # Then to the model doc lowerCamelCase__ = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 lowerCamelCase__ = api_doc[scheduler_idx]["""sections"""] lowerCamelCase__ = clean_doc_toc(__lowerCAmelCase ) lowerCamelCase__ = False if new_scheduler_doc != scheduler_doc: lowerCamelCase__ = True if overwrite: lowerCamelCase__ = new_scheduler_doc if diff: if overwrite: lowerCamelCase__ = api_doc with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__lowerCAmelCase , allow_unicode=__lowerCAmelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def A__ ( __lowerCAmelCase : List[str]=False ): with open(__lowerCAmelCase , encoding="""utf-8""" ) as f: lowerCamelCase__ = yaml.safe_load(f.read() ) # Get to the API doc lowerCamelCase__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCamelCase__ = content[api_idx]["""sections"""] # Then to the model doc lowerCamelCase__ = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 lowerCamelCase__ = False lowerCamelCase__ = api_doc[pipeline_idx]["""sections"""] lowerCamelCase__ = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: lowerCamelCase__ = pipeline_doc["""section"""] lowerCamelCase__ = clean_doc_toc(__lowerCAmelCase ) if overwrite: lowerCamelCase__ = new_sub_pipeline_doc new_pipeline_docs.append(__lowerCAmelCase ) # sort overall pipeline doc lowerCamelCase__ = clean_doc_toc(__lowerCAmelCase ) if new_pipeline_docs != pipeline_docs: lowerCamelCase__ = True if overwrite: lowerCamelCase__ = new_pipeline_docs if diff: if overwrite: lowerCamelCase__ = api_doc with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__lowerCAmelCase , allow_unicode=__lowerCAmelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": UpperCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') UpperCamelCase : Any = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
50
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'gpt_bigcode' _UpperCamelCase = ['past_key_values'] _UpperCamelCase = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = scale_attn_weights lowerCamelCase__ = use_cache lowerCamelCase__ = attention_softmax_in_fpaa lowerCamelCase__ = scale_attention_softmax_in_fpaa lowerCamelCase__ = multi_query lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A__ ( __lowerCAmelCase : dict ): return (data["data"], data["target"]) def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ): lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__lowerCAmelCase , __lowerCAmelCase ) # Predict target for test data lowerCamelCase__ = xgb.predict(__lowerCAmelCase ) lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 ) return predictions def A__ ( ): lowerCamelCase__ = fetch_california_housing() lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split( __lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 ) lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
50
'''simple docstring''' from PIL import Image def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ): def brightness(__lowerCAmelCase : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
50
1
'''simple docstring''' from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCamelCase : Optional[int] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' UpperCamelCase : List[str] = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' UpperCamelCase : Any = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCamelCase__ (datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ,id="""token""" ) ,id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" ,id="""token""" ) ,id="""sequence""" ) ,id="""references""" ), } ) ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 1 ,_lowerCAmelCase = 4 ,): return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_lowerCAmelCase ,hypotheses=_lowerCAmelCase ,min_len=_lowerCAmelCase ,max_len=_lowerCAmelCase ) }
50
'''simple docstring''' def A__ ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCamelCase : Dict = generate_large_matrix() UpperCamelCase : Any = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A__ ( __lowerCAmelCase : list[list[int]] ): assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid ) assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(__lowerCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCamelCase__ = (left + right) // 2 lowerCamelCase__ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCamelCase__ = mid + 1 else: lowerCamelCase__ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(grid[0] ) for i in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = find_negative_index(grid[i][:bound] ) total += bound return (len(__lowerCAmelCase ) * len(grid[0] )) - total def A__ ( __lowerCAmelCase : list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 for row in grid: for i, number in enumerate(__lowerCAmelCase ): if number < 0: total += len(__lowerCAmelCase ) - i break return total def A__ ( ): from timeit import timeit print("""Running benchmarks""" ) lowerCamelCase__ = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
50
1
'''simple docstring''' import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print('Googling.....') UpperCamelCase : int = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:]) UpperCamelCase : List[str] = requests.get(url, headers={'UserAgent': UserAgent().random}) # res.raise_for_status() with open('project1a.html', 'wb') as out_file: # only for knowing the class for data in res.iter_content(1_00_00): out_file.write(data) UpperCamelCase : int = BeautifulSoup(res.text, 'html.parser') UpperCamelCase : Optional[Any] = list(soup.select('.eZt8xd'))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get('href')) else: webbrowser.open(F'https://google.com{link.get("href")}')
50
'''simple docstring''' import argparse import os import re import packaging.version UpperCamelCase : List[Any] = 'examples/' UpperCamelCase : int = { 'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), 'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCamelCase : Any = { 'init': 'src/transformers/__init__.py', 'setup': 'setup.py', } UpperCamelCase : Any = 'README.md' def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern] lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase ) lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : str ): for folder, directories, fnames in os.walk(__lowerCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not patch: update_version_in_examples(__lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = """🤗 Transformers currently provides the following architectures""" lowerCamelCase__ = """1. Want to contribute a new model?""" with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Find the start of the list. lowerCamelCase__ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase__ = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__lowerCAmelCase ) def A__ ( ): with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0] return packaging.version.parse(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Union[str, Any]=False ): lowerCamelCase__ = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase__ = default_version.base_version elif patch: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = default_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def A__ ( ): lowerCamelCase__ = get_version() lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase__ = current_version.base_version # Check with the user we got that right. lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = dev_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCamelCase : Any = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
50
1
'''simple docstring''' import math def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = [True] * n lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): lowerCamelCase__ = i * 2 while index < n: lowerCamelCase__ = False lowerCamelCase__ = index + i lowerCamelCase__ = [2] for i in range(3 , __lowerCAmelCase , 2 ): if is_prime[i]: primes.append(__lowerCAmelCase ) return primes def A__ ( __lowerCAmelCase : int = 9999_6666_3333 ): lowerCamelCase__ = math.floor(math.sqrt(__lowerCAmelCase ) ) + 100 lowerCamelCase__ = prime_sieve(__lowerCAmelCase ) lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = primes[prime_index] while (last_prime**2) <= limit: lowerCamelCase__ = primes[prime_index + 1] lowerCamelCase__ = last_prime**2 lowerCamelCase__ = next_prime**2 # Get numbers divisible by lps(current) lowerCamelCase__ = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) lowerCamelCase__ = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps lowerCamelCase__ = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair lowerCamelCase__ = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
50
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase : int = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } UpperCamelCase : Tuple = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } UpperCamelCase : Dict = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = SqueezeBertTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**_lowerCAmelCase ) lowerCamelCase__ = do_lower_case def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
50
1
'''simple docstring''' from functools import lru_cache @lru_cache def A__ ( __lowerCAmelCase : int ): if num < 0: raise ValueError("""Number should not be negative.""" ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
50
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A__ ( __lowerCAmelCase : Any ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def A__ ( __lowerCAmelCase : str ): # word like '180' or '身高' or '神' for char in word: lowerCamelCase__ = ord(__lowerCAmelCase ) if not _is_chinese_char(__lowerCAmelCase ): return 0 return 1 def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = set() for token in tokens: lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase ) if chinese_word: word_set.add(__lowerCAmelCase ) lowerCamelCase__ = list(__lowerCAmelCase ) return word_list def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ): if not chinese_word_set: return bert_tokens lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] ) lowerCamelCase__ = bert_tokens lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase ) while start < end: lowerCamelCase__ = True if is_chinese(bert_word[start] ): lowerCamelCase__ = min(end - start , __lowerCAmelCase ) for i in range(__lowerCAmelCase , 1 , -1 ): lowerCamelCase__ = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCamelCase__ = """##""" + bert_word[j] lowerCamelCase__ = start + i lowerCamelCase__ = False break if single_word: start += 1 return bert_word def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ): lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res] ltp_res.extend(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = [] for id in input_ids: lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase ) input_tokens.append(__lowerCAmelCase ) lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__lowerCAmelCase ): if token[:2] == "##": lowerCamelCase__ = token[2:] # save chinese tokens' pos if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ): ref_id.append(__lowerCAmelCase ) ref_ids.append(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) return ref_ids def A__ ( __lowerCAmelCase : Optional[int] ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert ) lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids] f.writelines(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) UpperCamelCase : Any = parser.parse_args() main(args)
50
1
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = 3 lowerCamelCase__ = 2_50 lowerCamelCase__ = ids_tensor((batch_size, length) ,_lowerCAmelCase ) lowerCamelCase__ = torch.ones((batch_size, length) ,device=_lowerCAmelCase ,dtype=torch.float ) / length return input_ids, scores def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self._get_tensors(5 ) lowerCamelCase__ = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ , lowerCamelCase__ = self._get_tensors(9 ) self.assertFalse(criteria(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ , lowerCamelCase__ = self._get_tensors(10 ) self.assertTrue(criteria(_lowerCAmelCase ,_lowerCAmelCase ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = MaxLengthCriteria(max_length=10 ) lowerCamelCase__ , lowerCamelCase__ = self._get_tensors(5 ) self.assertFalse(criteria(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ , lowerCamelCase__ = self._get_tensors(9 ) self.assertFalse(criteria(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ , lowerCamelCase__ = self._get_tensors(10 ) self.assertTrue(criteria(_lowerCAmelCase ,_lowerCAmelCase ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = MaxNewTokensCriteria(start_length=5 ,max_new_tokens=5 ) lowerCamelCase__ , lowerCamelCase__ = self._get_tensors(5 ) self.assertFalse(criteria(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ , lowerCamelCase__ = self._get_tensors(9 ) self.assertFalse(criteria(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ , lowerCamelCase__ = self._get_tensors(10 ) self.assertTrue(criteria(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length ,10 ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self._get_tensors(5 ) lowerCamelCase__ = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = MaxTimeCriteria(max_time=0.1 ,initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(_lowerCAmelCase ,_lowerCAmelCase ) ) def UpperCamelCase_ ( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) ,10 ) with self.assertWarns(_lowerCAmelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) ,11 ) lowerCamelCase__ = validate_stopping_criteria(StoppingCriteriaList() ,11 ) self.assertEqual(len(_lowerCAmelCase ) ,1 )
50
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : Tuple = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: lowerCamelCase__ = 1024 lowerCamelCase__ = 4096 lowerCamelCase__ = 24 lowerCamelCase__ = 16 lowerCamelCase__ = [5, 11, 17, 23] lowerCamelCase__ = [256, 512, 1024, 1024] lowerCamelCase__ = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = [256, 512, 768, 768] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = (1, 384, 384) lowerCamelCase__ = False lowerCamelCase__ = """project""" if "ade" in checkpoint_url: lowerCamelCase__ = True lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = """ade20k-id2label.json""" lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} lowerCamelCase__ = [1, 150, 480, 480] return config, expected_shape def A__ ( __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : List[Any] ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: lowerCamelCase__ = name.replace("""patch_embed""" , """""" ) if "pos_embed" in name: lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: lowerCamelCase__ = name.replace("""proj""" , """projection""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: lowerCamelCase__ = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: lowerCamelCase__ = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" ) if "head" in name: lowerCamelCase__ = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" ) if "backbone" in name: lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" ) if ".." in name: lowerCamelCase__ = name.replace("""..""" , """.""" ) if "stem.conv" in name: lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layers""" ) if "convolution" in name and "backbone" in name: lowerCamelCase__ = name.replace("""convolution""" , """conv""" ) if "layer" in name and "backbone" in name: lowerCamelCase__ = name.replace("""layer""" , """layers""" ) if "backbone.bit.encoder.bit" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" ) if "embedder.conv" in name: lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" ) return name def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[: config.hidden_size, :] lowerCamelCase__ = in_proj_bias[: config.hidden_size] lowerCamelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase__ = in_proj_bias[-config.hidden_size :] def A__ ( ): lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ): lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(__lowerCAmelCase ) # rename keys for key in state_dict.copy().keys(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) lowerCamelCase__ = val # read in qkv matrices read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() # Check outputs on an image lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384 lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ) # forward pass lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth if show_prediction: lowerCamelCase__ = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) parser.add_argument( '--show_prediction', action='store_true', ) UpperCamelCase : List[str] = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
50
1
'''simple docstring''' # This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : str ): lowerCamelCase__ = multiprocessing.Manager() lowerCamelCase__ = manager.list() lowerCamelCase__ = multiprocessing.Process(target=__lowerCAmelCase , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ): with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil lowerCamelCase__ = shutil.rmtree lowerCamelCase__ = os.rmdir lowerCamelCase__ = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: lowerCamelCase__ = {} with swallow_io(): with time_limit(__lowerCAmelCase ): exec(__lowerCAmelCase , __lowerCAmelCase ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(F'''failed: {e}''' ) # Needed for cleaning up. lowerCamelCase__ = rmtree lowerCamelCase__ = rmdir lowerCamelCase__ = chdir @contextlib.contextmanager def A__ ( __lowerCAmelCase : Tuple ): def signal_handler(__lowerCAmelCase : str , __lowerCAmelCase : Tuple ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL , __lowerCAmelCase ) signal.signal(signal.SIGALRM , __lowerCAmelCase ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def A__ ( ): lowerCamelCase__ = WriteOnlyStringIO() with contextlib.redirect_stdout(__lowerCAmelCase ): with contextlib.redirect_stderr(__lowerCAmelCase ): with redirect_stdin(__lowerCAmelCase ): yield @contextlib.contextmanager def A__ ( ): with tempfile.TemporaryDirectory() as dirname: with chdir(__lowerCAmelCase ): yield dirname class UpperCamelCase__ (a ): '''simple docstring''' pass class UpperCamelCase__ (io.StringIO ): '''simple docstring''' def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): raise OSError def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): raise OSError def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): raise OSError def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): return False class UpperCamelCase__ (contextlib._RedirectStream ): # type: ignore '''simple docstring''' _UpperCamelCase = 'stdin' @contextlib.contextmanager def A__ ( __lowerCAmelCase : Optional[int] ): if root == ".": yield return lowerCamelCase__ = os.getcwd() os.chdir(__lowerCAmelCase ) try: yield except BaseException as exc: raise exc finally: os.chdir(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : List[str]=None ): if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins lowerCamelCase__ = None lowerCamelCase__ = None import os lowerCamelCase__ = """1""" lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None import shutil lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None import subprocess lowerCamelCase__ = None # type: ignore lowerCamelCase__ = None import sys lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Tuple = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor UpperCamelCase : Union[str, Any] = transforms.Compose( [ transforms.Resize((2_56, 2_56)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def A__ ( __lowerCAmelCase : Optional[int] ): if isinstance(__lowerCAmelCase , torch.Tensor ): return image elif isinstance(__lowerCAmelCase , PIL.Image.Image ): lowerCamelCase__ = [image] lowerCamelCase__ = [trans(img.convert("""RGB""" ) ) for img in image] lowerCamelCase__ = torch.stack(__lowerCAmelCase ) return image class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ): super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase__ = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if strength < 0 or strength > 1: raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): # get the original timestep using init_timestep lowerCamelCase__ = min(int(num_inference_steps * strength ) ,_lowerCAmelCase ) lowerCamelCase__ = max(num_inference_steps - init_timestep ,0 ) lowerCamelCase__ = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ): if not isinstance(_lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowerCAmelCase )}''' ) lowerCamelCase__ = image.to(device=_lowerCAmelCase ,dtype=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(_lowerCAmelCase )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCamelCase__ = init_latents.shape lowerCamelCase__ = randn_tensor(_lowerCAmelCase ,generator=_lowerCAmelCase ,device=_lowerCAmelCase ,dtype=_lowerCAmelCase ) # get latents print("""add noise to latents at timestep""" ,_lowerCAmelCase ) lowerCamelCase__ = self.scheduler.add_noise(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = init_latents return latents @torch.no_grad() def __call__( self ,_lowerCAmelCase = None ,_lowerCAmelCase = 0.8 ,_lowerCAmelCase = 1 ,_lowerCAmelCase = None ,_lowerCAmelCase = 0.0 ,_lowerCAmelCase = 50 ,_lowerCAmelCase = None ,_lowerCAmelCase = "pil" ,_lowerCAmelCase = True ,): self.check_inputs(_lowerCAmelCase ) # 2. Preprocess image lowerCamelCase__ = preprocess(_lowerCAmelCase ) # 3. set timesteps self.scheduler.set_timesteps(_lowerCAmelCase ,device=self.device ) lowerCamelCase__ , lowerCamelCase__ = self.get_timesteps(_lowerCAmelCase ,_lowerCAmelCase ,self.device ) lowerCamelCase__ = timesteps[:1].repeat(_lowerCAmelCase ) # 4. Prepare latent variables lowerCamelCase__ = self.prepare_latents(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,self.unet.dtype ,self.device ,_lowerCAmelCase ) lowerCamelCase__ = latents # 5. Denoising loop for t in self.progress_bar(_lowerCAmelCase ): # 1. predict noise model_output lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase__ = self.scheduler.step( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,eta=_lowerCAmelCase ,use_clipped_model_output=_lowerCAmelCase ,generator=_lowerCAmelCase ,).prev_sample lowerCamelCase__ = (image / 2 + 0.5).clamp(0 ,1 ) lowerCamelCase__ = image.cpu().permute(0 ,2 ,3 ,1 ).numpy() if output_type == "pil": lowerCamelCase__ = self.numpy_to_pil(_lowerCAmelCase ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=_lowerCAmelCase )
50
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Dict = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'codegen' _UpperCamelCase = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_ctx lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = rotary_dim lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = use_cache lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__( bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,): super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase ) if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ): # TODO: how to do that better? lowerCamelCase__ = 0 @property def UpperCamelCase_ ( self ): lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" ) lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""} else: lowerCamelCase__ = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCamelCase_ ( self ): return self._config.n_layer @property def UpperCamelCase_ ( self ): return self._config.n_head def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,): lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs( _lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase ) # We need to order the input in the way they appears in the forward() lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase__ = seqlen + 2 lowerCamelCase__ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase__ = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers ) ] lowerCamelCase__ = common_inputs["""attention_mask"""] if self.use_past: lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype lowerCamelCase__ = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 ) return ordered_inputs @property def UpperCamelCase_ ( self ): return 13
50
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Optional[Any] = logging.get_logger(__name__) UpperCamelCase : Optional[int] = { 'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'mgp-str' def __init__( self ,_lowerCAmelCase=[32, 1_28] ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=27 ,_lowerCAmelCase=38 ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=3_05_22 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = max_token_length lowerCamelCase__ = num_character_labels lowerCamelCase__ = num_bpe_labels lowerCamelCase__ = num_wordpiece_labels lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = mlp_ratio lowerCamelCase__ = distilled lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = drop_rate lowerCamelCase__ = qkv_bias lowerCamelCase__ = attn_drop_rate lowerCamelCase__ = drop_path_rate lowerCamelCase__ = output_aa_attentions lowerCamelCase__ = initializer_range
50
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : int = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def A__ ( __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : int=10 , __lowerCAmelCase : Optional[Any]=100 , __lowerCAmelCase : Union[str, Any]=1026 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[Any]="data/tokenized_stories_train_wikitext103.jbl" , __lowerCAmelCase : Optional[int]="igf_context_pairs.jbl" , ): set_seed(3 ) # generate train_data and objective_set lowerCamelCase__ , lowerCamelCase__ = generate_datasets( __lowerCAmelCase , __lowerCAmelCase , number=__lowerCAmelCase , min_len=1026 , trim=__lowerCAmelCase ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? lowerCamelCase__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # load pretrained model lowerCamelCase__ = load_gpta("""gpt2""" ).to(__lowerCAmelCase ) print("""computing perplexity on objective set""" ) lowerCamelCase__ = compute_perplexity(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).item() print("""perplexity on objective set:""" , __lowerCAmelCase ) # collect igf pairs and save to file demo.jbl collect_objective_set(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Any=15 , __lowerCAmelCase : List[str]=128 , __lowerCAmelCase : Any=100 , __lowerCAmelCase : Optional[int]="igf_model.pt" , ): set_seed(42 ) # Load pre-trained model lowerCamelCase__ = GPTaLMHeadModel.from_pretrained("""gpt2""" ) # Initialize secondary learner to use embedding weights of model lowerCamelCase__ = SecondaryLearner(__lowerCAmelCase ) # Train secondary learner lowerCamelCase__ = train_secondary_learner( __lowerCAmelCase , __lowerCAmelCase , max_epochs=__lowerCAmelCase , batch_size=__lowerCAmelCase , eval_freq=100 , igf_model_path=__lowerCAmelCase , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str=32 , __lowerCAmelCase : Tuple=1000 , __lowerCAmelCase : str=16 , __lowerCAmelCase : Optional[int]=1.0 , __lowerCAmelCase : List[str]=recopy_gpta , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : str=10 , __lowerCAmelCase : int="gpt2_finetuned.pt" , ): lowerCamelCase__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) lowerCamelCase__ = RandomSampler(__lowerCAmelCase ) lowerCamelCase__ = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase ) lowerCamelCase__ = max_steps // (len(__lowerCAmelCase )) + 1 lowerCamelCase__ = 0 lowerCamelCase__ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = recopy_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) model.train() if secondary_learner is not None: secondary_learner.to(__lowerCAmelCase ) secondary_learner.eval() lowerCamelCase__ = [] lowerCamelCase__ = 0 lowerCamelCase__ = [] lowerCamelCase__ = [] # Compute the performance of the transformer model at the beginning lowerCamelCase__ = compute_perplexity(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) test_perps.append(__lowerCAmelCase ) print("""Test perplexity, step""" , __lowerCAmelCase , """:""" , __lowerCAmelCase ) for epoch in range(int(__lowerCAmelCase ) ): for step, example in enumerate(__lowerCAmelCase ): torch.cuda.empty_cache() lowerCamelCase__ = random.randint(0 , example.size(2 ) - context_len - 1 ) lowerCamelCase__ = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() lowerCamelCase__ = model(__lowerCAmelCase , labels=__lowerCAmelCase ) lowerCamelCase__ = True if secondary_learner is not None: lowerCamelCase__ = secondary_learner.forward( torch.tensor(__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase ).unsqueeze(0 ) )[0].item() observed_qs.append(float(__lowerCAmelCase ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: lowerCamelCase__ = -1 if predicted_q < threshold: lowerCamelCase__ = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) lowerCamelCase__ = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() lowerCamelCase__ = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: lowerCamelCase__ = compute_perplexity(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) test_perps.append(__lowerCAmelCase ) print("""Test perplexity, step""" , __lowerCAmelCase , """:""" , __lowerCAmelCase ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , __lowerCAmelCase ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def A__ ( ): lowerCamelCase__ = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" ) # Required parameters parser.add_argument( """--data_dir""" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The input data dir. Should contain data files for WikiText.""" , ) parser.add_argument( """--model_name_or_path""" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--data_file""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help=( """A jbl file containing tokenized data which can be split as objective dataset, """ """train_dataset and test_dataset.""" ) , ) parser.add_argument( """--igf_data_file""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , ) parser.add_argument( """--output_dir""" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The output directory where the final fine-tuned model is stored.""" , ) parser.add_argument( """--tokenizer_name""" , default=__lowerCAmelCase , type=__lowerCAmelCase , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument("""--seed""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""A seed for reproducible training.""" ) parser.add_argument( """--context_len""" , default=32 , type=__lowerCAmelCase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--size_objective_set""" , default=100 , type=__lowerCAmelCase , help="""number of articles that are long enough to be used as our objective set""" , ) parser.add_argument( """--eval_freq""" , default=100 , type=__lowerCAmelCase , help="""secondary model evaluation is triggered at eval_freq""" ) parser.add_argument("""--max_steps""" , default=1000 , type=__lowerCAmelCase , help="""To calculate training epochs""" ) parser.add_argument( """--secondary_learner_batch_size""" , default=128 , type=__lowerCAmelCase , help="""batch size of training data for secondary learner""" , ) parser.add_argument( """--batch_size""" , default=16 , type=__lowerCAmelCase , help="""batch size of training data of language model(gpt2) """ ) parser.add_argument( """--eval_interval""" , default=10 , type=__lowerCAmelCase , help=( """decay the selectivity of our secondary learner filter from""" """1 standard deviation above average to 1 below average after 10 batches""" ) , ) parser.add_argument( """--number""" , default=100 , type=__lowerCAmelCase , help="""The number of examples split to be used as objective_set/test_data""" ) parser.add_argument( """--min_len""" , default=1026 , type=__lowerCAmelCase , help="""The minimum length of the article to be used as objective set""" ) parser.add_argument( """--secondary_learner_max_epochs""" , default=15 , type=__lowerCAmelCase , help="""number of epochs to train secondary learner""" ) parser.add_argument("""--trim""" , default=__lowerCAmelCase , type=__lowerCAmelCase , help="""truncate the example if it exceeds context length""" ) parser.add_argument( """--threshold""" , default=1.0 , type=__lowerCAmelCase , help=( """The threshold value used by secondary learner to filter the train_data and allow only""" """ informative data as input to the model""" ) , ) parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__lowerCAmelCase , help="""finetuned_model_name""" ) parser.add_argument( """--recopy_model""" , default=__lowerCAmelCase , type=__lowerCAmelCase , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowerCAmelCase , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , ) # Load train data for secondary learner lowerCamelCase__ = joblib.load("""data/IGF_values.jbl""" ) # Train secondary learner lowerCamelCase__ = training_secondary_learner( __lowerCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , ) # load pretrained gpt2 model lowerCamelCase__ = GPTaLMHeadModel.from_pretrained("""gpt2""" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model lowerCamelCase__ , lowerCamelCase__ = generate_datasets( context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1026 , trim=__lowerCAmelCase ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowerCAmelCase , secondary_learner=__lowerCAmelCase , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , ) if __name__ == "__main__": main()
50
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(__lowerCAmelCase , torch.Tensor ): raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" ) lowerCamelCase__ = v.half() if save_path is None: # overwrite src_path lowerCamelCase__ = src_path torch.save(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": fire.Fire(convert)
50
1
'''simple docstring''' from collections.abc import Generator def A__ ( ): lowerCamelCase__ , lowerCamelCase__ = 0, 1 while True: lowerCamelCase__ , lowerCamelCase__ = b, a + b yield b def A__ ( __lowerCAmelCase : int = 1000 ): lowerCamelCase__ = 1 lowerCamelCase__ = fibonacci_generator() while len(str(next(__lowerCAmelCase ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
50
'''simple docstring''' import os from pathlib import Path def A__ ( ): from torch.utils.cpp_extension import load lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" lowerCamelCase__ = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
50
1
'''simple docstring''' import re import string import numpy as np import datasets UpperCamelCase : str = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' UpperCamelCase : Optional[Any] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' UpperCamelCase : Union[str, Any] = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCamelCase__ (datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ,id="""sequence""" ), """references""": datasets.Value("""string""" ,id="""sequence""" ), } ) ,reference_urls=[] ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,): if regexes_to_ignore is not None: for s in regexes_to_ignore: lowerCamelCase__ = np.array([re.sub(_lowerCAmelCase ,"""""" ,_lowerCAmelCase ) for x in predictions] ) lowerCamelCase__ = np.array([re.sub(_lowerCAmelCase ,"""""" ,_lowerCAmelCase ) for x in references] ) else: lowerCamelCase__ = np.asarray(_lowerCAmelCase ) lowerCamelCase__ = np.asarray(_lowerCAmelCase ) if ignore_case: lowerCamelCase__ = np.char.lower(_lowerCAmelCase ) lowerCamelCase__ = np.char.lower(_lowerCAmelCase ) if ignore_punctuation: lowerCamelCase__ = string.punctuation.maketrans("""""" ,"""""" ,string.punctuation ) lowerCamelCase__ = np.char.translate(_lowerCAmelCase ,table=_lowerCAmelCase ) lowerCamelCase__ = np.char.translate(_lowerCAmelCase ,table=_lowerCAmelCase ) if ignore_numbers: lowerCamelCase__ = string.digits.maketrans("""""" ,"""""" ,string.digits ) lowerCamelCase__ = np.char.translate(_lowerCAmelCase ,table=_lowerCAmelCase ) lowerCamelCase__ = np.char.translate(_lowerCAmelCase ,table=_lowerCAmelCase ) lowerCamelCase__ = predictions == references return {"exact_match": np.mean(_lowerCAmelCase ) * 1_00}
50
'''simple docstring''' def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ): lowerCamelCase__ = len(__lowerCAmelCase ) print("""The following activities are selected:""" ) # The first activity is always selected lowerCamelCase__ = 0 print(__lowerCAmelCase , end=""",""" ) # Consider rest of the activities for j in range(__lowerCAmelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(__lowerCAmelCase , end=""",""" ) lowerCamelCase__ = j if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5] UpperCamelCase : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
50
1
'''simple docstring''' from __future__ import annotations def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = 2 lowerCamelCase__ = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(__lowerCAmelCase ) if n > 1: factors.append(__lowerCAmelCase ) return factors if __name__ == "__main__": import doctest doctest.testmod()
50
'''simple docstring''' import warnings from ..trainer import Trainer from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ): warnings.warn( """`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """ """instead.""" ,_lowerCAmelCase ,) super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' from functools import reduce UpperCamelCase : Dict = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def A__ ( __lowerCAmelCase : str = N ): return max( # mypy cannot properly interpret reduce int(reduce(lambda __lowerCAmelCase , __lowerCAmelCase : str(int(__lowerCAmelCase ) * int(__lowerCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(__lowerCAmelCase ) - 12 ) ) if __name__ == "__main__": print(F'{solution() = }')
50
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = [] for line in lines: lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments if line: filtered_lines.append(__lowerCAmelCase ) lowerCamelCase__ = """\n""".join(__lowerCAmelCase ) # Make a hash from all this code lowerCamelCase__ = full_str.encode("""utf-8""" ) return shaaaa(__lowerCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCamelCase : Dict = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCamelCase : str = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCamelCase : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
50
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['torch', 'transformers', 'onnx'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['torch', 'transformers', 'onnx'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['torch', 'transformers', 'onnx'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['torch', 'transformers', 'onnx'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['torch', 'transformers', 'onnx'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['torch', 'transformers', 'onnx'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
50
'''simple docstring''' import operator def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ): lowerCamelCase__ = operator.lt if reverse else operator.gt lowerCamelCase__ = solution or [] if not arr: return solution lowerCamelCase__ = [arr.pop(0 )] for i, item in enumerate(__lowerCAmelCase ): if _operator(__lowerCAmelCase , sublist[-1] ): sublist.append(__lowerCAmelCase ) arr.pop(__lowerCAmelCase ) # merging sublist into solution list if not solution: solution.extend(__lowerCAmelCase ) else: while sublist: lowerCamelCase__ = sublist.pop(0 ) for i, xx in enumerate(__lowerCAmelCase ): if not _operator(__lowerCAmelCase , __lowerCAmelCase ): solution.insert(__lowerCAmelCase , __lowerCAmelCase ) break else: solution.append(__lowerCAmelCase ) strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = int(__lowerCAmelCase ) if decimal in (0, 1): # Exit cases for the recursion return str(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = divmod(__lowerCAmelCase , 2 ) return binary_recursive(__lowerCAmelCase ) + str(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : str ): lowerCamelCase__ = str(__lowerCAmelCase ).strip() if not number: raise ValueError("""No input value was provided""" ) lowerCamelCase__ = """-""" if number.startswith("""-""" ) else """""" lowerCamelCase__ = number.lstrip("""-""" ) if not number.isnumeric(): raise ValueError("""Input value is not an integer""" ) return F'''{negative}0b{binary_recursive(int(__lowerCAmelCase ) )}''' if __name__ == "__main__": from doctest import testmod testmod()
50
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A__ ( __lowerCAmelCase : dict ): return (data["data"], data["target"]) def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ): lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__lowerCAmelCase , __lowerCAmelCase ) # Predict target for test data lowerCamelCase__ = xgb.predict(__lowerCAmelCase ) lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 ) return predictions def A__ ( ): lowerCamelCase__ = fetch_california_housing() lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split( __lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 ) lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
50
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available UpperCamelCase : List[Any] = {'tokenization_herbert': ['HerbertTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = ['HerbertTokenizerFast'] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = jnp.ones((batch_size, length) ) / length return scores def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 20 lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase ) # tweak scores to not be uniform anymore lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create ramp distribution lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowerCamelCase__ = 5 lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 ) lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy() lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # check edge cases with negative and extreme logits lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowerCamelCase__ = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) # check that min length is applied at length 5 lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 ) lowerCamelCase__ = 5 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] ) # check that min length is not applied anymore at length 15 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = 15 lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the bos_token_id score lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 ) lowerCamelCase__ = 1 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = 5 lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the eos_token_id when max_length is reached lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 ) lowerCamelCase__ = 4 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # with processor list lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores # with processor list def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
50
1
'''simple docstring''' import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = 10 def UpperCamelCase_ ( self ): lowerCamelCase__ = [1, 2, 3, 4] lowerCamelCase__ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(_lowerCAmelCase ,self.block_size ,0 ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_lowerCAmelCase ,self.block_size ,0 ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_lowerCAmelCase ,self.block_size ,0 ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.""" lowerCamelCase__ , lowerCamelCase__ = process_story(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase ,[] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """""" lowerCamelCase__ , lowerCamelCase__ = process_story(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase ,[] ) self.assertEqual(_lowerCAmelCase ,[] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = ( """It was the year of Our Lord one thousand seven hundred and """ """seventy-five\n\nSpiritual revelations were conceded to England """ """at that favoured period, as at this.\n@highlight\n\nIt was the best of times""" ) lowerCamelCase__ , lowerCamelCase__ = process_story(_lowerCAmelCase ) lowerCamelCase__ = [ """It was the year of Our Lord one thousand seven hundred and seventy-five.""", """Spiritual revelations were conceded to England at that favoured period, as at this.""", ] self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = ["""It was the best of times."""] self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = torch.tensor([1, 2, 3, 4] ) lowerCamelCase__ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(_lowerCAmelCase ,0 ).numpy() ,expected.numpy() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) lowerCamelCase__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_lowerCAmelCase ,23 ).numpy() ,expected.numpy() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowerCamelCase__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_lowerCAmelCase ,1 ).numpy() ,expected.numpy() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 1_01 lowerCamelCase__ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] ) lowerCamelCase__ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowerCamelCase__ = compute_token_type_ids(_lowerCAmelCase ,_lowerCAmelCase ) np.testing.assert_array_equal(_lowerCAmelCase ,_lowerCAmelCase )
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase : Any = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : str = [ 'word_embeddings_layernorm.weight', 'word_embeddings_layernorm.bias', 'input_layernorm.weight', 'input_layernorm.bias', 'post_attention_layernorm.weight', 'post_attention_layernorm.bias', 'self_attention.dense.bias', 'mlp.dense_4h_to_h.bias', 'ln_f.weight', 'ln_f.bias', ] UpperCamelCase : Tuple = [ 'mlp.dense_4h_to_h.weight', 'self_attention.dense.weight', ] def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Any ): lowerCamelCase__ = { """word_embeddings.weight""": """word_embeddings.weight""", """word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""", """word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""", """weight""": """ln_f.weight""", """bias""": """ln_f.bias""", } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks lowerCamelCase__ = int(re.match(R""".*layer_(\d*).*""" , __lowerCAmelCase )[1] ) layer_number -= 3 return F'''h.{layer_number}.''' + key def A__ ( __lowerCAmelCase : Tuple ): if dtype == torch.bool: return 1 / 8 lowerCamelCase__ = re.search(R"""[^\d](\d+)$""" , str(__lowerCAmelCase ) ) if bit_search is None: raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' ) lowerCamelCase__ = int(bit_search.groups()[0] ) return bit_size // 8 def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ): # Construct model if bloom_config_file == "": lowerCamelCase__ = BloomConfig() else: lowerCamelCase__ = BloomConfig.from_json_file(__lowerCAmelCase ) if shard_model: lowerCamelCase__ = os.listdir(__lowerCAmelCase ) lowerCamelCase__ = sorted(filter(lambda __lowerCAmelCase : s.startswith("""layer""" ) and "model_00" in s , __lowerCAmelCase ) ) lowerCamelCase__ = {"""weight_map""": {}, """metadata""": {}} lowerCamelCase__ = 0 lowerCamelCase__ = None lowerCamelCase__ = BloomConfig() for j, file in enumerate(__lowerCAmelCase ): print("""Processing file: {}""".format(__lowerCAmelCase ) ) lowerCamelCase__ = None for i in range(__lowerCAmelCase ): # load all TP files lowerCamelCase__ = file.replace("""model_00""" , F'''model_0{i}''' ) lowerCamelCase__ = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , map_location="""cpu""" ) # Rename keys in the transformers names lowerCamelCase__ = list(temp.keys() ) for key in keys: lowerCamelCase__ = temp.pop(__lowerCAmelCase ) if tensors is None: lowerCamelCase__ = temp else: for key in tensors.keys(): if any(key.endswith(__lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel lowerCamelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks lowerCamelCase__ = torch.cat([tensors[key], temp[key]] , dim=__lowerCAmelCase ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(__lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): lowerCamelCase__ = tensors[key] / pretraining_tp torch.save( __lowerCAmelCase , os.path.join( __lowerCAmelCase , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(__lowerCAmelCase ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): lowerCamelCase__ = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: lowerCamelCase__ = """pytorch_model_{}-of-{}.bin""".format( str(j + 1 ).zfill(5 ) , str(len(__lowerCAmelCase ) ).zfill(5 ) ) lowerCamelCase__ = BloomConfig() lowerCamelCase__ = pytorch_dump_folder_path + """/""" + CONFIG_NAME lowerCamelCase__ = total_size with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) with open(os.path.join(__lowerCAmelCase , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + """\n""" f.write(__lowerCAmelCase ) else: lowerCamelCase__ = BloomModel(__lowerCAmelCase ) lowerCamelCase__ = os.listdir(__lowerCAmelCase ) lowerCamelCase__ = sorted(filter(lambda __lowerCAmelCase : s.startswith("""layer""" ) and "model_00" in s , __lowerCAmelCase ) ) lowerCamelCase__ = None for i, file in enumerate(__lowerCAmelCase ): lowerCamelCase__ = None for i in range(__lowerCAmelCase ): # load all TP files lowerCamelCase__ = file.replace("""model_00""" , F'''model_0{i}''' ) lowerCamelCase__ = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , map_location="""cpu""" ) # Rename keys in the transformers names lowerCamelCase__ = list(temp.keys() ) for key in keys: lowerCamelCase__ = temp.pop(__lowerCAmelCase ) if tensors is None: lowerCamelCase__ = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(__lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel lowerCamelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks lowerCamelCase__ = torch.cat([tensors[key], temp[key]] , dim=__lowerCAmelCase ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(__lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): lowerCamelCase__ = tensors[key] / pretraining_tp lowerCamelCase__ = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected''' if missing_keys is None: lowerCamelCase__ = set(other_keys.missing_keys ) else: lowerCamelCase__ = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, F'''The keys {missing_keys} are missing''' # Save pytorch-model os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) lowerCamelCase__ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME lowerCamelCase__ = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' ) if config.torch_dtype is not None: lowerCamelCase__ = model.to(config.torch_dtype ) torch.save(model.state_dict() , __lowerCAmelCase ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--bloom_checkpoint_path', default=None, type=str, required=True, help='Path to the Megatron-LM checkpoint path.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--bloom_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--shard_model', action='store_true', help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint', ) parser.add_argument( '--pretraining_tp', default=4, type=int, help='Pretraining TP rank that has been used when training the model in Megatron-LM \n', ) UpperCamelCase : Optional[Any] = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
50
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
50
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings UpperCamelCase : List[str] = r'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n' @add_start_docstrings(a ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'rag' _UpperCamelCase = True def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=" / " ,_lowerCAmelCase=" // " ,_lowerCAmelCase=5 ,_lowerCAmelCase=3_00 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=8 ,_lowerCAmelCase="wiki_dpr" ,_lowerCAmelCase="train" ,_lowerCAmelCase="compressed" ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( bos_token_id=_lowerCAmelCase ,pad_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,decoder_start_token_id=_lowerCAmelCase ,forced_eos_token_id=_lowerCAmelCase ,is_encoder_decoder=_lowerCAmelCase ,prefix=_lowerCAmelCase ,vocab_size=_lowerCAmelCase ,**_lowerCAmelCase ,) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" lowerCamelCase__ = kwargs.pop("""question_encoder""" ) lowerCamelCase__ = question_encoder_config.pop("""model_type""" ) lowerCamelCase__ = kwargs.pop("""generator""" ) lowerCamelCase__ = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig lowerCamelCase__ = AutoConfig.for_model(_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = AutoConfig.for_model(_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = reduce_loss lowerCamelCase__ = label_smoothing lowerCamelCase__ = exclude_bos_score lowerCamelCase__ = do_marginalize lowerCamelCase__ = title_sep lowerCamelCase__ = doc_sep lowerCamelCase__ = n_docs lowerCamelCase__ = max_combined_length lowerCamelCase__ = dataset lowerCamelCase__ = dataset_split lowerCamelCase__ = index_name lowerCamelCase__ = retrieval_vector_size lowerCamelCase__ = retrieval_batch_size lowerCamelCase__ = passages_path lowerCamelCase__ = index_path lowerCamelCase__ = use_dummy_dataset lowerCamelCase__ = output_retrieved lowerCamelCase__ = do_deduplication lowerCamelCase__ = use_cache if self.forced_eos_token_id is None: lowerCamelCase__ = getattr(self.generator ,"""forced_eos_token_id""" ,_lowerCAmelCase ) @classmethod def UpperCamelCase_ ( cls ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ): return cls(question_encoder=question_encoder_config.to_dict() ,generator=generator_config.to_dict() ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = copy.deepcopy(self.__dict__ ) lowerCamelCase__ = self.question_encoder.to_dict() lowerCamelCase__ = self.generator.to_dict() lowerCamelCase__ = self.__class__.model_type return output
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Union[str, Any] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Any = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) UpperCamelCase : List[Any] = pytest.mark.integration @pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] ) def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ): inspect_dataset(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = path + """.py""" assert script_name in os.listdir(__lowerCAmelCase ) assert "__pycache__" not in os.listdir(__lowerCAmelCase ) @pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.parametrize("""path""" , ["""accuracy"""] ) def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ): inspect_metric(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = path + """.py""" assert script_name in os.listdir(__lowerCAmelCase ) assert "__pycache__" not in os.listdir(__lowerCAmelCase ) @pytest.mark.parametrize( """path, config_name, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ): with pytest.raises(__lowerCAmelCase ): get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase ) @pytest.mark.parametrize( """path, expected""" , [ ("""squad""", """plain_text"""), ("""acronym_identification""", """default"""), ("""lhoestq/squad""", """plain_text"""), ("""lhoestq/test""", """default"""), ("""lhoestq/demo1""", """lhoestq--demo1"""), ("""dalle-mini/wit""", """dalle-mini--wit"""), ] , ) def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): lowerCamelCase__ = get_dataset_config_names(__lowerCAmelCase ) assert expected in config_names @pytest.mark.parametrize( """path, expected_configs, expected_splits_in_first_config""" , [ ("""squad""", ["""plain_text"""], ["""train""", """validation"""]), ("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]), ("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]), ] , ) def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ): lowerCamelCase__ = get_dataset_infos(__lowerCAmelCase ) assert list(infos.keys() ) == expected_configs lowerCamelCase__ = expected_configs[0] assert expected_config in infos lowerCamelCase__ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( """path, expected_config, expected_splits""" , [ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] , ) def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] ): lowerCamelCase__ = get_dataset_infos(__lowerCAmelCase ) assert expected_config in infos lowerCamelCase__ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" , [ ("""paws""", None, ValueError), ] , ) def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ): with pytest.raises(__lowerCAmelCase ): get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
50
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCamelCase : int = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
50
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Union[str, Any] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Any = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'gpt_bigcode' _UpperCamelCase = ['past_key_values'] _UpperCamelCase = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = scale_attn_weights lowerCamelCase__ = use_cache lowerCamelCase__ = attention_softmax_in_fpaa lowerCamelCase__ = scale_attention_softmax_in_fpaa lowerCamelCase__ = multi_query lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : str ): if n_term == "": return [] lowerCamelCase__ = [] for temp in range(int(__lowerCAmelCase ) ): series.append(F'''1/{temp + 1}''' if series else """1""" ) return series if __name__ == "__main__": UpperCamelCase : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
50
'''simple docstring''' from PIL import Image def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ): def brightness(__lowerCAmelCase : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
50
1
'''simple docstring''' from __future__ import annotations from scipy.special import comb # type: ignore class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ): lowerCamelCase__ = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. lowerCamelCase__ = len(_lowerCAmelCase ) - 1 def UpperCamelCase_ ( self ,_lowerCAmelCase ): assert 0 <= t <= 1, "Time t must be between 0 and 1." lowerCamelCase__ = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree ,_lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(_lowerCAmelCase ) ,5 ) == 1 return output_values def UpperCamelCase_ ( self ,_lowerCAmelCase ): assert 0 <= t <= 1, "Time t must be between 0 and 1." lowerCamelCase__ = self.basis_function(_lowerCAmelCase ) lowerCamelCase__ = 0.0 lowerCamelCase__ = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def UpperCamelCase_ ( self ,_lowerCAmelCase = 0.01 ): from matplotlib import pyplot as plt # type: ignore lowerCamelCase__ = [] # x coordinates of points to plot lowerCamelCase__ = [] # y coordinates of points to plot lowerCamelCase__ = 0.0 while t <= 1: lowerCamelCase__ = self.bezier_curve_function(_lowerCAmelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size lowerCamelCase__ = [i[0] for i in self.list_of_points] lowerCamelCase__ = [i[1] for i in self.list_of_points] plt.plot( _lowerCAmelCase ,_lowerCAmelCase ,color="""blue""" ,label="""Curve of Degree """ + str(self.degree ) ,) plt.scatter(_lowerCAmelCase ,_lowerCAmelCase ,color="""red""" ,label="""Control Points""" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
50
'''simple docstring''' def A__ ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCamelCase : Dict = generate_large_matrix() UpperCamelCase : Any = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A__ ( __lowerCAmelCase : list[list[int]] ): assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid ) assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(__lowerCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCamelCase__ = (left + right) // 2 lowerCamelCase__ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCamelCase__ = mid + 1 else: lowerCamelCase__ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(grid[0] ) for i in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = find_negative_index(grid[i][:bound] ) total += bound return (len(__lowerCAmelCase ) * len(grid[0] )) - total def A__ ( __lowerCAmelCase : list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 for row in grid: for i, number in enumerate(__lowerCAmelCase ): if number < 0: total += len(__lowerCAmelCase ) - i break return total def A__ ( ): from timeit import timeit print("""Running benchmarks""" ) lowerCamelCase__ = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
50
1
'''simple docstring''' from statistics import mean import numpy as np def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ): lowerCamelCase__ = 0 # Number of processes finished lowerCamelCase__ = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. lowerCamelCase__ = [0] * no_of_process # List to include calculation results lowerCamelCase__ = [0] * no_of_process # Sort by arrival time. lowerCamelCase__ = [burst_time[i] for i in np.argsort(__lowerCAmelCase )] lowerCamelCase__ = [process_name[i] for i in np.argsort(__lowerCAmelCase )] arrival_time.sort() while no_of_process > finished_process_count: lowerCamelCase__ = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: lowerCamelCase__ = arrival_time[i] lowerCamelCase__ = 0 # Index showing the location of the process being performed lowerCamelCase__ = 0 # Saves the current response ratio. lowerCamelCase__ = 0 for i in range(0 , __lowerCAmelCase ): if finished_process[i] == 0 and arrival_time[i] <= current_time: lowerCamelCase__ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: lowerCamelCase__ = temp lowerCamelCase__ = i # Calculate the turn around time lowerCamelCase__ = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. lowerCamelCase__ = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ): lowerCamelCase__ = [0] * no_of_process for i in range(0 , __lowerCAmelCase ): lowerCamelCase__ = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": UpperCamelCase : Optional[int] = 5 UpperCamelCase : str = ['A', 'B', 'C', 'D', 'E'] UpperCamelCase : Optional[int] = [1, 2, 3, 4, 5] UpperCamelCase : Tuple = [1, 2, 3, 4, 5] UpperCamelCase : str = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) UpperCamelCase : Dict = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time') for i in range(0, no_of_process): print( F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t' F'{turn_around_time[i]}\t\t\t{waiting_time[i]}' ) print(F'average waiting time : {mean(waiting_time):.5f}') print(F'average turn around time : {mean(turn_around_time):.5f}')
50
'''simple docstring''' import argparse import os import re import packaging.version UpperCamelCase : List[Any] = 'examples/' UpperCamelCase : int = { 'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), 'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCamelCase : Any = { 'init': 'src/transformers/__init__.py', 'setup': 'setup.py', } UpperCamelCase : Any = 'README.md' def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern] lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase ) lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : str ): for folder, directories, fnames in os.walk(__lowerCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not patch: update_version_in_examples(__lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = """🤗 Transformers currently provides the following architectures""" lowerCamelCase__ = """1. Want to contribute a new model?""" with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Find the start of the list. lowerCamelCase__ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase__ = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__lowerCAmelCase ) def A__ ( ): with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0] return packaging.version.parse(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Union[str, Any]=False ): lowerCamelCase__ = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase__ = default_version.base_version elif patch: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = default_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def A__ ( ): lowerCamelCase__ = get_version() lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase__ = current_version.base_version # Check with the user we got that right. lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = dev_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCamelCase : Any = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
50
1
'''simple docstring''' import socket def A__ ( ): lowerCamelCase__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) lowerCamelCase__ = socket.gethostname() lowerCamelCase__ = 1_2312 sock.connect((host, port) ) sock.send(b"""Hello server!""" ) with open("""Received_file""" , """wb""" ) as out_file: print("""File opened""" ) print("""Receiving data...""" ) while True: lowerCamelCase__ = sock.recv(1024 ) if not data: break out_file.write(__lowerCAmelCase ) print("""Successfully received the file""" ) sock.close() print("""Connection closed""" ) if __name__ == "__main__": main()
50
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase : int = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } UpperCamelCase : Tuple = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } UpperCamelCase : Dict = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = SqueezeBertTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**_lowerCAmelCase ) lowerCamelCase__ = do_lower_case def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
50
1
'''simple docstring''' import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model') UpperCamelCase : Any = get_tests_dir('fixtures/test_sentencepiece_bpe.model') UpperCamelCase : int = 'pt' if is_torch_available() else 'tf' @require_sentencepiece @require_tokenizers class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = CamembertTokenizer _UpperCamelCase = CamembertTokenizerFast _UpperCamelCase = True _UpperCamelCase = True def UpperCamelCase_ ( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ = CamembertTokenizer(_lowerCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """<pad>""" lowerCamelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<s>NOTUSED""" ) self.assertEqual(vocab_keys[1] ,"""<pad>""" ) self.assertEqual(vocab_keys[-1] ,"""<mask>""" ) self.assertEqual(len(_lowerCAmelCase ) ,10_04 ) def UpperCamelCase_ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,10_05 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = CamembertTokenizer(_lowerCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) lowerCamelCase__ = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) lowerCamelCase__ = """I was born in 92000, and this is falsé.""" lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase ) lowerCamelCase__ = rust_tokenizer.encode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ) lowerCamelCase__ = rust_tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) lowerCamelCase__ = rust_tokenizer.tokenize(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): if not self.test_rust_tokenizer: return lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = self.get_rust_tokenizer() lowerCamelCase__ = """I was born in 92000, and this is falsé.""" lowerCamelCase__ = tokenizer.tokenize(_lowerCAmelCase ) lowerCamelCase__ = rust_tokenizer.tokenize(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ) lowerCamelCase__ = rust_tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = self.get_rust_tokenizer() lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase ) lowerCamelCase__ = rust_tokenizer.encode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): # fmt: off lowerCamelCase__ = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. lowerCamelCase__ = [ """Le transformeur est un modèle d'apprentissage profond introduit en 2017, """ """utilisé principalement dans le domaine du traitement automatique des langues (TAL).""", """À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """ """pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """ """telles que la traduction et la synthèse de texte.""", ] self.tokenizer_integration_test_util( expected_encoding=_lowerCAmelCase ,model_name="""camembert-base""" ,revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" ,sequences=_lowerCAmelCase ,)
50
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A__ ( __lowerCAmelCase : Any ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def A__ ( __lowerCAmelCase : str ): # word like '180' or '身高' or '神' for char in word: lowerCamelCase__ = ord(__lowerCAmelCase ) if not _is_chinese_char(__lowerCAmelCase ): return 0 return 1 def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = set() for token in tokens: lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase ) if chinese_word: word_set.add(__lowerCAmelCase ) lowerCamelCase__ = list(__lowerCAmelCase ) return word_list def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ): if not chinese_word_set: return bert_tokens lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] ) lowerCamelCase__ = bert_tokens lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase ) while start < end: lowerCamelCase__ = True if is_chinese(bert_word[start] ): lowerCamelCase__ = min(end - start , __lowerCAmelCase ) for i in range(__lowerCAmelCase , 1 , -1 ): lowerCamelCase__ = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCamelCase__ = """##""" + bert_word[j] lowerCamelCase__ = start + i lowerCamelCase__ = False break if single_word: start += 1 return bert_word def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ): lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res] ltp_res.extend(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = [] for id in input_ids: lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase ) input_tokens.append(__lowerCAmelCase ) lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__lowerCAmelCase ): if token[:2] == "##": lowerCamelCase__ = token[2:] # save chinese tokens' pos if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ): ref_id.append(__lowerCAmelCase ) ref_ids.append(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) return ref_ids def A__ ( __lowerCAmelCase : Optional[int] ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert ) lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids] f.writelines(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) UpperCamelCase : Any = parser.parse_args() main(args)
50
1
'''simple docstring''' import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=3 ,_lowerCAmelCase=32 ,_lowerCAmelCase=3 ,_lowerCAmelCase=10 ,_lowerCAmelCase=[10, 20, 30, 40] ,_lowerCAmelCase=[1, 1, 2, 1] ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase="relu" ,_lowerCAmelCase=3 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = num_channels lowerCamelCase__ = embeddings_size lowerCamelCase__ = hidden_sizes lowerCamelCase__ = depths lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_act lowerCamelCase__ = num_labels lowerCamelCase__ = scope lowerCamelCase__ = len(_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = self.get_config() return config, pixel_values def UpperCamelCase_ ( self ): return RegNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = FlaxRegNetModel(config=_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.num_labels lowerCamelCase__ = FlaxRegNetForImageClassification(config=_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = FlaxRegNetModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ) def UpperCamelCase_ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self ): return def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): def check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase__ = self.model_tester.num_stages self.assertEqual(len(_lowerCAmelCase ) ,expected_num_stages + 1 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = True check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ = True check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model_class(_lowerCAmelCase ) @jax.jit def model_jitted(_lowerCAmelCase ,**_lowerCAmelCase ): return model(pixel_values=_lowerCAmelCase ,**_lowerCAmelCase ) with self.subTest("""JIT Enabled""" ): lowerCamelCase__ = model_jitted(**_lowerCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): lowerCamelCase__ = model_jitted(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) ,len(_lowerCAmelCase ) ) for jitted_output, output in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertEqual(jitted_output.shape ,output.shape ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_flax class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = model(**_lowerCAmelCase ) # verify the logits lowerCamelCase__ = (1, 10_00) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = jnp.array([-0.4180, -1.5051, -3.4836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1E-4 ) )
50
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : Tuple = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: lowerCamelCase__ = 1024 lowerCamelCase__ = 4096 lowerCamelCase__ = 24 lowerCamelCase__ = 16 lowerCamelCase__ = [5, 11, 17, 23] lowerCamelCase__ = [256, 512, 1024, 1024] lowerCamelCase__ = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = [256, 512, 768, 768] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = (1, 384, 384) lowerCamelCase__ = False lowerCamelCase__ = """project""" if "ade" in checkpoint_url: lowerCamelCase__ = True lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = """ade20k-id2label.json""" lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} lowerCamelCase__ = [1, 150, 480, 480] return config, expected_shape def A__ ( __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : List[Any] ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: lowerCamelCase__ = name.replace("""patch_embed""" , """""" ) if "pos_embed" in name: lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: lowerCamelCase__ = name.replace("""proj""" , """projection""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: lowerCamelCase__ = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: lowerCamelCase__ = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" ) if "head" in name: lowerCamelCase__ = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" ) if "backbone" in name: lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" ) if ".." in name: lowerCamelCase__ = name.replace("""..""" , """.""" ) if "stem.conv" in name: lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layers""" ) if "convolution" in name and "backbone" in name: lowerCamelCase__ = name.replace("""convolution""" , """conv""" ) if "layer" in name and "backbone" in name: lowerCamelCase__ = name.replace("""layer""" , """layers""" ) if "backbone.bit.encoder.bit" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" ) if "embedder.conv" in name: lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" ) return name def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[: config.hidden_size, :] lowerCamelCase__ = in_proj_bias[: config.hidden_size] lowerCamelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase__ = in_proj_bias[-config.hidden_size :] def A__ ( ): lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ): lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(__lowerCAmelCase ) # rename keys for key in state_dict.copy().keys(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) lowerCamelCase__ = val # read in qkv matrices read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() # Check outputs on an image lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384 lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ) # forward pass lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth if show_prediction: lowerCamelCase__ = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) parser.add_argument( '--show_prediction', action='store_true', ) UpperCamelCase : List[str] = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = [] if len(__lowerCAmelCase ) == 1: return [nums.copy()] for _ in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = nums.pop(0 ) lowerCamelCase__ = permute(__lowerCAmelCase ) for perm in permutations: perm.append(__lowerCAmelCase ) result.extend(__lowerCAmelCase ) nums.append(__lowerCAmelCase ) return result def A__ ( __lowerCAmelCase : List[Any] ): def backtrack(__lowerCAmelCase : Optional[int] ): if start == len(__lowerCAmelCase ) - 1: output.append(nums[:] ) else: for i in range(__lowerCAmelCase , len(__lowerCAmelCase ) ): lowerCamelCase__ , lowerCamelCase__ = nums[i], nums[start] backtrack(start + 1 ) lowerCamelCase__ , lowerCamelCase__ = nums[i], nums[start] # backtrack lowerCamelCase__ = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function UpperCamelCase : Dict = permutea([1, 2, 3]) print(res) doctest.testmod()
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Tuple = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class UpperCamelCase__ (a ,a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = StableDiffusionControlNetImgaImgPipeline _UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'} _UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} ) _UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase_ ( self ): torch.manual_seed(0 ) lowerCamelCase__ = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,) torch.manual_seed(0 ) lowerCamelCase__ = ControlNetModel( block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,) torch.manual_seed(0 ) lowerCamelCase__ = DDIMScheduler( beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=_lowerCAmelCase ,set_alpha_to_one=_lowerCAmelCase ,) torch.manual_seed(0 ) lowerCamelCase__ = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,) torch.manual_seed(0 ) lowerCamelCase__ = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) lowerCamelCase__ = CLIPTextModel(_lowerCAmelCase ) lowerCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase__ = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=0 ): if str(_lowerCAmelCase ).startswith("""mps""" ): lowerCamelCase__ = torch.manual_seed(_lowerCAmelCase ) else: lowerCamelCase__ = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) lowerCamelCase__ = 2 lowerCamelCase__ = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_lowerCAmelCase ,device=torch.device(_lowerCAmelCase ) ,) lowerCamelCase__ = floats_tensor(control_image.shape ,rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) lowerCamelCase__ = image.cpu().permute(0 ,2 ,3 ,1 )[0] lowerCamelCase__ = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) lowerCamelCase__ = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def UpperCamelCase_ ( self ): return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,) def UpperCamelCase_ ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def UpperCamelCase_ ( self ): self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = StableDiffusionControlNetImgaImgPipeline _UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'} _UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _UpperCamelCase = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def UpperCamelCase_ ( self ): torch.manual_seed(0 ) lowerCamelCase__ = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,) torch.manual_seed(0 ) def init_weights(_lowerCAmelCase ): if isinstance(_lowerCAmelCase ,torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowerCamelCase__ = ControlNetModel( block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,) controlneta.controlnet_down_blocks.apply(_lowerCAmelCase ) torch.manual_seed(0 ) lowerCamelCase__ = ControlNetModel( block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,) controlneta.controlnet_down_blocks.apply(_lowerCAmelCase ) torch.manual_seed(0 ) lowerCamelCase__ = DDIMScheduler( beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=_lowerCAmelCase ,set_alpha_to_one=_lowerCAmelCase ,) torch.manual_seed(0 ) lowerCamelCase__ = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,) torch.manual_seed(0 ) lowerCamelCase__ = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) lowerCamelCase__ = CLIPTextModel(_lowerCAmelCase ) lowerCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase__ = MultiControlNetModel([controlneta, controlneta] ) lowerCamelCase__ = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=0 ): if str(_lowerCAmelCase ).startswith("""mps""" ): lowerCamelCase__ = torch.manual_seed(_lowerCAmelCase ) else: lowerCamelCase__ = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) lowerCamelCase__ = 2 lowerCamelCase__ = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_lowerCAmelCase ,device=torch.device(_lowerCAmelCase ) ,), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_lowerCAmelCase ,device=torch.device(_lowerCAmelCase ) ,), ] lowerCamelCase__ = floats_tensor(control_image[0].shape ,rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) lowerCamelCase__ = image.cpu().permute(0 ,2 ,3 ,1 )[0] lowerCamelCase__ = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) lowerCamelCase__ = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_dummy_components() lowerCamelCase__ = self.pipeline_class(**_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) lowerCamelCase__ = 10.0 lowerCamelCase__ = 4 lowerCamelCase__ = self.get_dummy_inputs(_lowerCAmelCase ) lowerCamelCase__ = steps lowerCamelCase__ = scale lowerCamelCase__ = pipe(**_lowerCAmelCase )[0] lowerCamelCase__ = self.get_dummy_inputs(_lowerCAmelCase ) lowerCamelCase__ = steps lowerCamelCase__ = scale lowerCamelCase__ = pipe(**_lowerCAmelCase ,control_guidance_start=0.1 ,control_guidance_end=0.2 )[0] lowerCamelCase__ = self.get_dummy_inputs(_lowerCAmelCase ) lowerCamelCase__ = steps lowerCamelCase__ = scale lowerCamelCase__ = pipe(**_lowerCAmelCase ,control_guidance_start=[0.1, 0.3] ,control_guidance_end=[0.2, 0.7] )[0] lowerCamelCase__ = self.get_dummy_inputs(_lowerCAmelCase ) lowerCamelCase__ = steps lowerCamelCase__ = scale lowerCamelCase__ = pipe(**_lowerCAmelCase ,control_guidance_start=0.4 ,control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def UpperCamelCase_ ( self ): return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,) def UpperCamelCase_ ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def UpperCamelCase_ ( self ): self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_dummy_components() lowerCamelCase__ = self.pipeline_class(**_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(_lowerCAmelCase ) except NotImplementedError: pass @slow @require_torch_gpu class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self ): lowerCamelCase__ = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" ) lowerCamelCase__ = StableDiffusionControlNetImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,safety_checker=_lowerCAmelCase ,controlnet=_lowerCAmelCase ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCamelCase__ = """evil space-punk bird""" lowerCamelCase__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((5_12, 5_12) ) lowerCamelCase__ = load_image( """https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((5_12, 5_12) ) lowerCamelCase__ = pipe( _lowerCAmelCase ,_lowerCAmelCase ,control_image=_lowerCAmelCase ,generator=_lowerCAmelCase ,output_type="""np""" ,num_inference_steps=50 ,strength=0.6 ,) lowerCamelCase__ = output.images[0] assert image.shape == (5_12, 5_12, 3) lowerCamelCase__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" ) assert np.abs(expected_image - image ).max() < 9E-2
50
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Dict = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'codegen' _UpperCamelCase = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_ctx lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = rotary_dim lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = use_cache lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__( bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,): super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase ) if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ): # TODO: how to do that better? lowerCamelCase__ = 0 @property def UpperCamelCase_ ( self ): lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" ) lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""} else: lowerCamelCase__ = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCamelCase_ ( self ): return self._config.n_layer @property def UpperCamelCase_ ( self ): return self._config.n_head def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,): lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs( _lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase ) # We need to order the input in the way they appears in the forward() lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase__ = seqlen + 2 lowerCamelCase__ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase__ = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers ) ] lowerCamelCase__ = common_inputs["""attention_mask"""] if self.use_past: lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype lowerCamelCase__ = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 ) return ordered_inputs @property def UpperCamelCase_ ( self ): return 13
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : str ): lowerCamelCase__ = 0 # if input_string is "aba" than new_input_string become "a|b|a" lowerCamelCase__ = """""" lowerCamelCase__ = """""" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(__lowerCAmelCase ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring lowerCamelCase__ , lowerCamelCase__ = 0, 0 # length[i] shows the length of palindromic substring with center i lowerCamelCase__ = [1 for i in range(len(__lowerCAmelCase ) )] # for each character in new_string find corresponding palindromic string lowerCamelCase__ = 0 for j in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(__lowerCAmelCase ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 lowerCamelCase__ = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: lowerCamelCase__ = j - k + 1 # noqa: E741 lowerCamelCase__ = j + k - 1 # update max_length and start position if max_length < length[j]: lowerCamelCase__ = length[j] lowerCamelCase__ = j # create that string lowerCamelCase__ = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
50
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : int = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @property def UpperCamelCase_ ( self ): torch.manual_seed(0 ) lowerCamelCase__ = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,) return model def UpperCamelCase_ ( self ): lowerCamelCase__ = self.dummy_uncond_unet lowerCamelCase__ = PNDMScheduler() lowerCamelCase__ = PNDMPipeline(unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ) pndm.to(_lowerCAmelCase ) pndm.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = pndm(generator=_lowerCAmelCase ,num_inference_steps=20 ,output_type="""numpy""" ).images lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = pndm(generator=_lowerCAmelCase ,num_inference_steps=20 ,output_type="""numpy""" ,return_dict=_lowerCAmelCase )[0] lowerCamelCase__ = image[0, -3:, -3:, -1] lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = """google/ddpm-cifar10-32""" lowerCamelCase__ = UNetaDModel.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = PNDMScheduler() lowerCamelCase__ = PNDMPipeline(unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ) pndm.to(_lowerCAmelCase ) pndm.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = pndm(generator=_lowerCAmelCase ,output_type="""numpy""" ).images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase__ = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
50
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(__lowerCAmelCase , torch.Tensor ): raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" ) lowerCamelCase__ = v.half() if save_path is None: # overwrite src_path lowerCamelCase__ = src_path torch.save(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": fire.Fire(convert)
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
50
'''simple docstring''' import os from pathlib import Path def A__ ( ): from torch.utils.cpp_extension import load lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" lowerCamelCase__ = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
50
1
'''simple docstring''' from math import factorial def A__ ( __lowerCAmelCase : int = 20 ): lowerCamelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... lowerCamelCase__ = n // 2 return int(factorial(__lowerCAmelCase ) / (factorial(__lowerCAmelCase ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: UpperCamelCase : Tuple = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
50
'''simple docstring''' def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ): lowerCamelCase__ = len(__lowerCAmelCase ) print("""The following activities are selected:""" ) # The first activity is always selected lowerCamelCase__ = 0 print(__lowerCAmelCase , end=""",""" ) # Consider rest of the activities for j in range(__lowerCAmelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(__lowerCAmelCase , end=""",""" ) lowerCamelCase__ = j if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5] UpperCamelCase : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
50
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() # fmt: off lowerCamelCase__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on lowerCamelCase__ = dict(zip(_lowerCAmelCase ,range(len(_lowerCAmelCase ) ) ) ) lowerCamelCase__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] lowerCamelCase__ = {"""unk_token""": """<unk>"""} lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(_lowerCAmelCase ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_lowerCAmelCase ) ) lowerCamelCase__ = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073], """image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowerCamelCase__ = os.path.join(self.tmpdirname ,_lowerCAmelCase ) with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp: json.dump(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = self.get_rust_tokenizer() lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowerCamelCase__ = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_lowerCAmelCase ) lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowerCamelCase__ = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,_lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer ,_lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,_lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 ) lowerCamelCase__ = CLIPProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = processor(text=_lowerCAmelCase ) lowerCamelCase__ = tokenizer(_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
50
'''simple docstring''' import warnings from ..trainer import Trainer from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ): warnings.warn( """`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """ """instead.""" ,_lowerCAmelCase ,) super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ): # test for the above condition self.test() def UpperCamelCase_ ( self ): lowerCamelCase__ = 0 lowerCamelCase__ = False while not completed: if counter == 1: self.reset() lowerCamelCase__ = self.advance() if not self.does_advance(_lowerCAmelCase ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.update(_lowerCAmelCase ) counter += 1 if counter > 1_00_00: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def UpperCamelCase_ ( self ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCamelCase_ ( self ,_lowerCAmelCase ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCamelCase_ ( self ,_lowerCAmelCase ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCamelCase_ ( self ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCamelCase_ ( self ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCamelCase_ ( self ,_lowerCAmelCase=False ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ): super(_lowerCAmelCase ,self ).__init__() if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) or len(_lowerCAmelCase ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) lowerCamelCase__ = token_ids lowerCamelCase__ = len(self.token_ids ) lowerCamelCase__ = -1 # the index of the currently fulfilled step lowerCamelCase__ = False def UpperCamelCase_ ( self ): if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def UpperCamelCase_ ( self ,_lowerCAmelCase ): if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCAmelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def UpperCamelCase_ ( self ,_lowerCAmelCase ): if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCAmelCase )}''' ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False if self.does_advance(_lowerCAmelCase ): self.fulfilled_idx += 1 lowerCamelCase__ = True if self.fulfilled_idx == (self.seqlen - 1): lowerCamelCase__ = True lowerCamelCase__ = completed else: # failed to make progress. lowerCamelCase__ = True self.reset() return stepped, completed, reset def UpperCamelCase_ ( self ): lowerCamelCase__ = False lowerCamelCase__ = 0 def UpperCamelCase_ ( self ): return self.seqlen - (self.fulfilled_idx + 1) def UpperCamelCase_ ( self ,_lowerCAmelCase=False ): lowerCamelCase__ = PhrasalConstraint(self.token_ids ) if stateful: lowerCamelCase__ = self.seqlen lowerCamelCase__ = self.fulfilled_idx lowerCamelCase__ = self.completed return new_constraint class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=True ): lowerCamelCase__ = max([len(_lowerCAmelCase ) for one in nested_token_ids] ) lowerCamelCase__ = {} for token_ids in nested_token_ids: lowerCamelCase__ = root for tidx, token_id in enumerate(_lowerCAmelCase ): if token_id not in level: lowerCamelCase__ = {} lowerCamelCase__ = level[token_id] if no_subsets and self.has_subsets(_lowerCAmelCase ,_lowerCAmelCase ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" F''' {nested_token_ids}.''' ) lowerCamelCase__ = root def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = self.trie for current_token in current_seq: lowerCamelCase__ = start[current_token] lowerCamelCase__ = list(start.keys() ) return next_tokens def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = self.next_tokens(_lowerCAmelCase ) return len(_lowerCAmelCase ) == 0 def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = list(root.values() ) if len(_lowerCAmelCase ) == 0: return 1 else: return sum([self.count_leaves(_lowerCAmelCase ) for nn in next_nodes] ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.count_leaves(_lowerCAmelCase ) return len(_lowerCAmelCase ) != leaf_count class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ): super(_lowerCAmelCase ,self ).__init__() if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) or len(_lowerCAmelCase ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) lowerCamelCase__ = DisjunctiveTrie(_lowerCAmelCase ) lowerCamelCase__ = nested_token_ids lowerCamelCase__ = self.trie.max_height lowerCamelCase__ = [] lowerCamelCase__ = False def UpperCamelCase_ ( self ): lowerCamelCase__ = self.trie.next_tokens(self.current_seq ) if len(_lowerCAmelCase ) == 0: return None else: return token_list def UpperCamelCase_ ( self ,_lowerCAmelCase ): if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCAmelCase )}''' ) lowerCamelCase__ = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def UpperCamelCase_ ( self ,_lowerCAmelCase ): if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCAmelCase )}''' ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False if self.does_advance(_lowerCAmelCase ): self.current_seq.append(_lowerCAmelCase ) lowerCamelCase__ = True else: lowerCamelCase__ = True self.reset() lowerCamelCase__ = self.trie.reached_leaf(self.current_seq ) lowerCamelCase__ = completed return stepped, completed, reset def UpperCamelCase_ ( self ): lowerCamelCase__ = False lowerCamelCase__ = [] def UpperCamelCase_ ( self ): if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def UpperCamelCase_ ( self ,_lowerCAmelCase=False ): lowerCamelCase__ = DisjunctiveConstraint(self.token_ids ) if stateful: lowerCamelCase__ = self.seqlen lowerCamelCase__ = self.current_seq lowerCamelCase__ = self.completed return new_constraint class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ): lowerCamelCase__ = constraints # max # of steps required to fulfill a given constraint lowerCamelCase__ = max([c.seqlen for c in constraints] ) lowerCamelCase__ = len(_lowerCAmelCase ) lowerCamelCase__ = False self.init_state() def UpperCamelCase_ ( self ): lowerCamelCase__ = [] lowerCamelCase__ = None lowerCamelCase__ = [constraint.copy(stateful=_lowerCAmelCase ) for constraint in self.constraints] def UpperCamelCase_ ( self ): lowerCamelCase__ = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def UpperCamelCase_ ( self ): lowerCamelCase__ = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" lowerCamelCase__ = constraint.advance() if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): token_list.append(_lowerCAmelCase ) elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ): token_list.extend(_lowerCAmelCase ) else: lowerCamelCase__ = self.inprogress_constraint.advance() if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): token_list.append(_lowerCAmelCase ) elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ): token_list.extend(_lowerCAmelCase ) if len(_lowerCAmelCase ) == 0: return None else: return token_list def UpperCamelCase_ ( self ,_lowerCAmelCase ): self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint lowerCamelCase__ , lowerCamelCase__ = self.add(_lowerCAmelCase ) # the entire list of constraints are fulfilled if self.completed: break def UpperCamelCase_ ( self ,_lowerCAmelCase ): if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) lowerCamelCase__ , lowerCamelCase__ = False, False if self.completed: lowerCamelCase__ = True lowerCamelCase__ = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.inprogress_constraint.update(_lowerCAmelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCAmelCase ) ) lowerCamelCase__ = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) lowerCamelCase__ = None if len(self.pending_constraints ) == 0: # we're done! lowerCamelCase__ = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_lowerCAmelCase ): lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = pending_constraint.update(_lowerCAmelCase ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(_lowerCAmelCase ) lowerCamelCase__ = None if not complete and stepped: lowerCamelCase__ = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". lowerCamelCase__ = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. lowerCamelCase__ = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def UpperCamelCase_ ( self ,_lowerCAmelCase=True ): lowerCamelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: lowerCamelCase__ = [ constraint.copy(stateful=_lowerCAmelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: lowerCamelCase__ = self.inprogress_constraint.copy(stateful=_lowerCAmelCase ) lowerCamelCase__ = [constraint.copy() for constraint in self.pending_constraints] return new_state
50
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = [] for line in lines: lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments if line: filtered_lines.append(__lowerCAmelCase ) lowerCamelCase__ = """\n""".join(__lowerCAmelCase ) # Make a hash from all this code lowerCamelCase__ = full_str.encode("""utf-8""" ) return shaaaa(__lowerCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCamelCase : Dict = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCamelCase : str = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCamelCase : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): if len(__lowerCAmelCase ) != len(__lowerCAmelCase ): raise ValueError("""String lengths must match!""" ) lowerCamelCase__ = 0 for chara, chara in zip(__lowerCAmelCase , __lowerCAmelCase ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
50
'''simple docstring''' import operator def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ): lowerCamelCase__ = operator.lt if reverse else operator.gt lowerCamelCase__ = solution or [] if not arr: return solution lowerCamelCase__ = [arr.pop(0 )] for i, item in enumerate(__lowerCAmelCase ): if _operator(__lowerCAmelCase , sublist[-1] ): sublist.append(__lowerCAmelCase ) arr.pop(__lowerCAmelCase ) # merging sublist into solution list if not solution: solution.extend(__lowerCAmelCase ) else: while sublist: lowerCamelCase__ = sublist.pop(0 ) for i, xx in enumerate(__lowerCAmelCase ): if not _operator(__lowerCAmelCase , __lowerCAmelCase ): solution.insert(__lowerCAmelCase , __lowerCAmelCase ) break else: solution.append(__lowerCAmelCase ) strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
50
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) lowerCamelCase__ = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !" lowerCamelCase__ = model(_lowerCAmelCase )["""last_hidden_state"""] lowerCamelCase__ = tf.TensorShape((1, 10, 7_68) ) self.assertEqual(output.shape ,_lowerCAmelCase ) # compare the actual values for a slice. lowerCamelCase__ = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] ,dtype=tf.floataa ,) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
50
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A__ ( __lowerCAmelCase : dict ): return (data["data"], data["target"]) def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ): lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__lowerCAmelCase , __lowerCAmelCase ) # Predict target for test data lowerCamelCase__ = xgb.predict(__lowerCAmelCase ) lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 ) return predictions def A__ ( ): lowerCamelCase__ = fetch_california_housing() lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split( __lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 ) lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
50
1
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Dict = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'codegen' _UpperCamelCase = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_ctx lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = rotary_dim lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = use_cache lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__( bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,): super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase ) if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ): # TODO: how to do that better? lowerCamelCase__ = 0 @property def UpperCamelCase_ ( self ): lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" ) lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""} else: lowerCamelCase__ = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCamelCase_ ( self ): return self._config.n_layer @property def UpperCamelCase_ ( self ): return self._config.n_head def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,): lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs( _lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase ) # We need to order the input in the way they appears in the forward() lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase__ = seqlen + 2 lowerCamelCase__ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase__ = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers ) ] lowerCamelCase__ = common_inputs["""attention_mask"""] if self.use_past: lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype lowerCamelCase__ = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 ) return ordered_inputs @property def UpperCamelCase_ ( self ): return 13
50
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = jnp.ones((batch_size, length) ) / length return scores def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 20 lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase ) # tweak scores to not be uniform anymore lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create ramp distribution lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowerCamelCase__ = 5 lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 ) lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy() lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # check edge cases with negative and extreme logits lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowerCamelCase__ = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) # check that min length is applied at length 5 lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 ) lowerCamelCase__ = 5 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] ) # check that min length is not applied anymore at length 15 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = 15 lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the bos_token_id score lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 ) lowerCamelCase__ = 1 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = 5 lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the eos_token_id when max_length is reached lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 ) lowerCamelCase__ = 4 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # with processor list lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores # with processor list def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
50
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : Tuple = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: lowerCamelCase__ = 1024 lowerCamelCase__ = 4096 lowerCamelCase__ = 24 lowerCamelCase__ = 16 lowerCamelCase__ = [5, 11, 17, 23] lowerCamelCase__ = [256, 512, 1024, 1024] lowerCamelCase__ = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = [256, 512, 768, 768] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = (1, 384, 384) lowerCamelCase__ = False lowerCamelCase__ = """project""" if "ade" in checkpoint_url: lowerCamelCase__ = True lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = """ade20k-id2label.json""" lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} lowerCamelCase__ = [1, 150, 480, 480] return config, expected_shape def A__ ( __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : List[Any] ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: lowerCamelCase__ = name.replace("""patch_embed""" , """""" ) if "pos_embed" in name: lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: lowerCamelCase__ = name.replace("""proj""" , """projection""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: lowerCamelCase__ = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: lowerCamelCase__ = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" ) if "head" in name: lowerCamelCase__ = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" ) if "backbone" in name: lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" ) if ".." in name: lowerCamelCase__ = name.replace("""..""" , """.""" ) if "stem.conv" in name: lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layers""" ) if "convolution" in name and "backbone" in name: lowerCamelCase__ = name.replace("""convolution""" , """conv""" ) if "layer" in name and "backbone" in name: lowerCamelCase__ = name.replace("""layer""" , """layers""" ) if "backbone.bit.encoder.bit" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" ) if "embedder.conv" in name: lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" ) return name def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[: config.hidden_size, :] lowerCamelCase__ = in_proj_bias[: config.hidden_size] lowerCamelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase__ = in_proj_bias[-config.hidden_size :] def A__ ( ): lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ): lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(__lowerCAmelCase ) # rename keys for key in state_dict.copy().keys(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) lowerCamelCase__ = val # read in qkv matrices read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() # Check outputs on an image lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384 lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ) # forward pass lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth if show_prediction: lowerCamelCase__ = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) parser.add_argument( '--show_prediction', action='store_true', ) UpperCamelCase : List[str] = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase : Any = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging UpperCamelCase : int = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=False ): try: import torch # noqa: F401 except ImportError: logger.error( """Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise if not is_sharded: lowerCamelCase__ = os.path.abspath(__lowerCAmelCase ) logger.info(F'''Loading PyTorch weights from {pt_path}''' ) lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) lowerCamelCase__ = convert_pytorch_state_dict_to_flax(__lowerCAmelCase , __lowerCAmelCase ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(__lowerCAmelCase , __lowerCAmelCase ) return flax_state_dict def A__ ( __lowerCAmelCase : Tuple[str] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, jnp.ndarray] , __lowerCAmelCase : str , ): def is_key_or_prefix_key_in_dict(__lowerCAmelCase : Tuple[str] ) -> bool: return len(set(__lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCAmelCase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCamelCase__ = pt_tuple_key[:-1] + ("""mean""",) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCamelCase__ = pt_tuple_key[:-1] + ("""var""",) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCAmelCase ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ): lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ): lowerCamelCase__ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCamelCase__ = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCamelCase__ = pt_tuple_key[-2] + """_g""" elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCamelCase__ = pt_tuple_key[-2] + """_v""" if name is not None: lowerCamelCase__ = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): # convert pytorch tensor to numpy lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase__ = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCamelCase__ = flax_model.params["""params"""] else: lowerCamelCase__ = flax_model.params lowerCamelCase__ = flatten_dict(__lowerCAmelCase ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase__ = flatten_dict(flax_model.params["""batch_stats"""] ) random_flax_state_dict.update(__lowerCAmelCase ) lowerCamelCase__ = {} lowerCamelCase__ = (model_prefix not in flax_model_params) and ( model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) lowerCamelCase__ = (model_prefix in flax_model_params) and ( model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase__ = tuple(pt_key.split(""".""" ) ) # remove base model prefix if necessary lowerCamelCase__ = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase__ = pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # add model prefix if necessary lowerCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase__ = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCamelCase__ = jnp.asarray(__lowerCAmelCase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) continue # also add unexpected weight so that warning is thrown lowerCamelCase__ = jnp.asarray(__lowerCAmelCase ) else: # also add unexpected weight so that warning is thrown lowerCamelCase__ = jnp.asarray(__lowerCAmelCase ) return unflatten_dict(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ): import torch # Load the index lowerCamelCase__ = {} for shard_file in shard_filenames: # load using msgpack utils lowerCamelCase__ = torch.load(__lowerCAmelCase ) lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase__ = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase__ = flax_model.params["""params"""] lowerCamelCase__ = flatten_dict(__lowerCAmelCase ) random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) ) else: lowerCamelCase__ = flax_model.params lowerCamelCase__ = flatten_dict(__lowerCAmelCase ) lowerCamelCase__ = (model_prefix not in flax_model_params) and ( model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) lowerCamelCase__ = (model_prefix in flax_model_params) and ( model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase__ = tuple(pt_key.split(""".""" ) ) # remove base model prefix if necessary lowerCamelCase__ = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase__ = pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # add model prefix if necessary lowerCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase__ = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCamelCase__ = jnp.asarray(__lowerCAmelCase ) continue if "var" in flax_key[-1]: lowerCamelCase__ = jnp.asarray(__lowerCAmelCase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) continue # also add unexpected weight so that warning is thrown lowerCamelCase__ = jnp.asarray(__lowerCAmelCase ) else: # also add unexpected weight so that warning is thrown lowerCamelCase__ = jnp.asarray(__lowerCAmelCase ) return unflatten_dict(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ): lowerCamelCase__ = os.path.abspath(__lowerCAmelCase ) logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class lowerCamelCase__ = getattr(__lowerCAmelCase , """Flax""" + model.__class__.__name__ ) # load flax weight dict with open(__lowerCAmelCase , """rb""" ) as state_f: try: lowerCamelCase__ = from_bytes(__lowerCAmelCase , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple ): try: import torch # noqa: F401 except ImportError: logger.error( """Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights lowerCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda __lowerCAmelCase : x.dtype == jnp.bfloataa , __lowerCAmelCase ) ).values() if any(__lowerCAmelCase ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) lowerCamelCase__ = jax.tree_util.tree_map( lambda __lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowerCAmelCase ) lowerCamelCase__ = flatten_dict(__lowerCAmelCase ) lowerCamelCase__ = pt_model.state_dict() lowerCamelCase__ = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()} ) lowerCamelCase__ = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCamelCase__ = [] lowerCamelCase__ = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix lowerCamelCase__ = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase__ = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCAmelCase ) not in pt_model_dict: # conv layer lowerCamelCase__ = flax_key_tuple[:-1] + ("""weight""",) lowerCamelCase__ = jnp.transpose(__lowerCAmelCase , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCAmelCase ) not in pt_model_dict: # linear layer lowerCamelCase__ = flax_key_tuple[:-1] + ("""weight""",) lowerCamelCase__ = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase__ = flax_key_tuple[:-1] + ("""weight""",) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCamelCase__ = flax_key_tuple[:-1] + ("""running_mean""",) elif "var" in flax_key_tuple[-1]: lowerCamelCase__ = flax_key_tuple[:-1] + ("""running_var""",) if "batch_stats" in flax_state: lowerCamelCase__ = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCamelCase__ = """.""".join(__lowerCAmelCase ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCamelCase__ = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCamelCase__ = key.split(""".""" ) lowerCamelCase__ = None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCamelCase__ = key_components[-2] + """_g""" elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCamelCase__ = key_components[-2] + """_v""" if name is not None: lowerCamelCase__ = key_components[:-3] + [name] lowerCamelCase__ = """.""".join(__lowerCAmelCase ) lowerCamelCase__ = key if flax_key in special_pt_names: lowerCamelCase__ = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowerCamelCase__ = np.asarray(__lowerCAmelCase ) if not isinstance(__lowerCAmelCase , np.ndarray ) else flax_tensor lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase ) # remove from missing keys missing_keys.remove(__lowerCAmelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(__lowerCAmelCase ) pt_model.load_state_dict(__lowerCAmelCase ) # re-transform missing_keys to list lowerCamelCase__ = list(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) else: logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(__lowerCAmelCase ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' """ use it for predictions and inference.""" ) else: logger.warning( F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' """If your task is similar to the task the model of the checkpoint was trained on, """ F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
50
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
50
1
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCamelCase : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(a ) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): super().__init__(*_lowerCAmelCase ,**_lowerCAmelCase ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCamelCase_ ( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ): lowerCamelCase__ = {} lowerCamelCase__ = {} if prompt is not None: lowerCamelCase__ = prompt if generate_kwargs is not None: lowerCamelCase__ = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowerCamelCase__ = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) lowerCamelCase__ = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self ,_lowerCAmelCase ,**_lowerCAmelCase ): return super().__call__(_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = load_image(_lowerCAmelCase ) if prompt is not None: if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ): raise ValueError( F'''Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) lowerCamelCase__ = self.model.config.model_type if model_type == "git": lowerCamelCase__ = self.image_processor(images=_lowerCAmelCase ,return_tensors=self.framework ) lowerCamelCase__ = self.tokenizer(text=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ).input_ids lowerCamelCase__ = [self.tokenizer.cls_token_id] + input_ids lowerCamelCase__ = torch.tensor(_lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": lowerCamelCase__ = self.image_processor(images=_lowerCAmelCase ,header_text=_lowerCAmelCase ,return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowerCamelCase__ = self.image_processor(images=_lowerCAmelCase ,return_tensors=self.framework ) lowerCamelCase__ = self.tokenizer(_lowerCAmelCase ,return_tensors=self.framework ) model_inputs.update(_lowerCAmelCase ) else: raise ValueError(F'''Model type {model_type} does not support conditional text generation''' ) else: lowerCamelCase__ = self.image_processor(images=_lowerCAmelCase ,return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowerCamelCase__ = None return model_inputs def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] ,_lowerCAmelCase ) and all(x is None for x in model_inputs["""input_ids"""] ) ): lowerCamelCase__ = None if generate_kwargs is None: lowerCamelCase__ = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowerCamelCase__ = model_inputs.pop(self.model.main_input_name ) lowerCamelCase__ = self.model.generate(_lowerCAmelCase ,**_lowerCAmelCase ,**_lowerCAmelCase ) return model_outputs def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [] for output_ids in model_outputs: lowerCamelCase__ = { """generated_text""": self.tokenizer.decode( _lowerCAmelCase ,skip_special_tokens=_lowerCAmelCase ,) } records.append(_lowerCAmelCase ) return records
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Union[str, Any] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Any = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase : int = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } UpperCamelCase : Tuple = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } UpperCamelCase : Dict = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = SqueezeBertTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**_lowerCAmelCase ) lowerCamelCase__ = do_lower_case def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
50
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCamelCase : int = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
50
1
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = (EulerDiscreteScheduler,) _UpperCamelCase = 10 def UpperCamelCase_ ( self ,**_lowerCAmelCase ): lowerCamelCase__ = { """num_train_timesteps""": 11_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**_lowerCAmelCase ) return config def UpperCamelCase_ ( self ): for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_lowerCAmelCase ,beta_end=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config() lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = self.dummy_model() lowerCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase__ = sample.to(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase__ = scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,generator=_lowerCAmelCase ) lowerCamelCase__ = output.prev_sample lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 10.0807 ) < 1E-2 assert abs(result_mean.item() - 0.0131 ) < 1E-3 def UpperCamelCase_ ( self ): lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config(prediction_type="""v_prediction""" ) lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = self.dummy_model() lowerCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase__ = sample.to(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase__ = scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,generator=_lowerCAmelCase ) lowerCamelCase__ = output.prev_sample lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 0.0002 ) < 1E-2 assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3 def UpperCamelCase_ ( self ): lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config() lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ,device=_lowerCAmelCase ) lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = self.dummy_model() lowerCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() lowerCamelCase__ = sample.to(_lowerCAmelCase ) for t in scheduler.timesteps: lowerCamelCase__ = scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,generator=_lowerCAmelCase ) lowerCamelCase__ = output.prev_sample lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 10.0807 ) < 1E-2 assert abs(result_mean.item() - 0.0131 ) < 1E-3 def UpperCamelCase_ ( self ): lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config() lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ,use_karras_sigmas=_lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ,device=_lowerCAmelCase ) lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = self.dummy_model() lowerCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() lowerCamelCase__ = sample.to(_lowerCAmelCase ) for t in scheduler.timesteps: lowerCamelCase__ = scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,generator=_lowerCAmelCase ) lowerCamelCase__ = output.prev_sample lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2 assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
50
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'gpt_bigcode' _UpperCamelCase = ['past_key_values'] _UpperCamelCase = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = scale_attn_weights lowerCamelCase__ = use_cache lowerCamelCase__ = attention_softmax_in_fpaa lowerCamelCase__ = scale_attention_softmax_in_fpaa lowerCamelCase__ = multi_query lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : bool = False ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = F'''Expected string as input, found {type(__lowerCAmelCase )}''' raise ValueError(__lowerCAmelCase ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = F'''Expected boolean as use_pascal parameter, found {type(__lowerCAmelCase )}''' raise ValueError(__lowerCAmelCase ) lowerCamelCase__ = input_str.split("""_""" ) lowerCamelCase__ = 0 if use_pascal else 1 lowerCamelCase__ = words[start_index:] lowerCamelCase__ = [word[0].upper() + word[1:] for word in words_to_capitalize] lowerCamelCase__ = """""" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
50
'''simple docstring''' from PIL import Image def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ): def brightness(__lowerCAmelCase : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
50
1
'''simple docstring''' import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration UpperCamelCase : Dict = [ # tf -> hf ('/', '.'), ('layer_', 'layers.'), ('kernel', 'weight'), ('beta', 'bias'), ('gamma', 'weight'), ('pegasus', 'model'), ] UpperCamelCase : Union[str, Any] = [ ('.output.dense', '.fc2'), ('intermediate.LayerNorm', 'final_layer_norm'), ('intermediate.dense', 'fc1'), ] UpperCamelCase : Optional[int] = ( INIT_COMMON + [ ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.out_proj'), ('attention.self', 'self_attn'), ('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'), ('attention.encdec_output.dense', 'encoder_attn.out_proj'), ('attention.encdec', 'encoder_attn'), ('key', 'k_proj'), ('value', 'v_proj'), ('query', 'q_proj'), ('decoder.LayerNorm', 'decoder.layernorm_embedding'), ] + END_COMMON ) UpperCamelCase : Dict = ( INIT_COMMON + [ ('embeddings.word_embeddings', 'shared.weight'), ('embeddings.position_embeddings', 'embed_positions.weight'), ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.output'), ('attention.self', 'self_attn.self'), ('encoder.LayerNorm', 'encoder.layernorm_embedding'), ] + END_COMMON ) UpperCamelCase : List[str] = [ 'encdec/key/bias', 'encdec/query/bias', 'encdec/value/bias', 'self/key/bias', 'self/query/bias', 'self/value/bias', 'encdec_output/dense/bias', 'attention/output/dense/bias', ] def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): for tf_name, hf_name in patterns: lowerCamelCase__ = k.replace(__lowerCAmelCase , __lowerCAmelCase ) return k def A__ ( __lowerCAmelCase : dict , __lowerCAmelCase : dict ): lowerCamelCase__ = BigBirdPegasusConfig(**__lowerCAmelCase ) lowerCamelCase__ = BigBirdPegasusForConditionalGeneration(__lowerCAmelCase ) lowerCamelCase__ = torch_model.state_dict() lowerCamelCase__ = {} # separating decoder weights lowerCamelCase__ = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )} lowerCamelCase__ = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )} for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ): lowerCamelCase__ = [k.endswith(__lowerCAmelCase ) for ending in KEYS_TO_IGNORE] if any(__lowerCAmelCase ): continue lowerCamelCase__ = DECODER_PATTERNS lowerCamelCase__ = rename_state_dict_key(__lowerCAmelCase , __lowerCAmelCase ) if new_k not in state_dict: raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): lowerCamelCase__ = v.T lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase ) assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ): lowerCamelCase__ = [k.endswith(__lowerCAmelCase ) for ending in KEYS_TO_IGNORE] if any(__lowerCAmelCase ): continue lowerCamelCase__ = REMAINING_PATTERNS lowerCamelCase__ = rename_state_dict_key(__lowerCAmelCase , __lowerCAmelCase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): lowerCamelCase__ = v.T lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' lowerCamelCase__ = mapping["""model.embed_positions.weight"""] lowerCamelCase__ = mapping.pop("""model.embed_positions.weight""" ) lowerCamelCase__ , lowerCamelCase__ = torch_model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) lowerCamelCase__ = [ k for k in missing if k not in [ """final_logits_bias""", """model.encoder.embed_tokens.weight""", """model.decoder.embed_tokens.weight""", """lm_head.weight""", ] ] assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], F'''no matches found for the following tf keys {extra}''' return torch_model def A__ ( __lowerCAmelCase : List[Any] ): lowerCamelCase__ = tf.train.list_variables(__lowerCAmelCase ) lowerCamelCase__ = {} lowerCamelCase__ = ["""global_step"""] for name, shape in tqdm(__lowerCAmelCase , desc="""converting tf checkpoint to dict""" ): lowerCamelCase__ = any(pat in name for pat in ignore_name ) if skip_key: continue lowerCamelCase__ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = array return tf_weights def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict ): lowerCamelCase__ = get_tf_weights_as_numpy(__lowerCAmelCase ) lowerCamelCase__ = convert_bigbird_pegasus(__lowerCAmelCase , __lowerCAmelCase ) torch_model.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') UpperCamelCase : Union[str, Any] = parser.parse_args() UpperCamelCase : List[Any] = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
50
'''simple docstring''' def A__ ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCamelCase : Dict = generate_large_matrix() UpperCamelCase : Any = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A__ ( __lowerCAmelCase : list[list[int]] ): assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid ) assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(__lowerCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCamelCase__ = (left + right) // 2 lowerCamelCase__ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCamelCase__ = mid + 1 else: lowerCamelCase__ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(grid[0] ) for i in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = find_negative_index(grid[i][:bound] ) total += bound return (len(__lowerCAmelCase ) * len(grid[0] )) - total def A__ ( __lowerCAmelCase : list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 for row in grid: for i, number in enumerate(__lowerCAmelCase ): if number < 0: total += len(__lowerCAmelCase ) - i break return total def A__ ( ): from timeit import timeit print("""Running benchmarks""" ) lowerCamelCase__ = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
50
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Optional[int] = { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'speech_to_text' _UpperCamelCase = ['past_key_values'] _UpperCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self ,_lowerCAmelCase=1_00_00 ,_lowerCAmelCase=12 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=4 ,_lowerCAmelCase=6 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=4 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase="relu" ,_lowerCAmelCase=2_56 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=2 ,_lowerCAmelCase=True ,_lowerCAmelCase=1 ,_lowerCAmelCase=0 ,_lowerCAmelCase=2 ,_lowerCAmelCase=60_00 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=2 ,_lowerCAmelCase=(5, 5) ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=80 ,_lowerCAmelCase=1 ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = d_model lowerCamelCase__ = encoder_ffn_dim lowerCamelCase__ = encoder_layers lowerCamelCase__ = encoder_attention_heads lowerCamelCase__ = decoder_ffn_dim lowerCamelCase__ = decoder_layers lowerCamelCase__ = decoder_attention_heads lowerCamelCase__ = dropout lowerCamelCase__ = attention_dropout lowerCamelCase__ = activation_dropout lowerCamelCase__ = activation_function lowerCamelCase__ = init_std lowerCamelCase__ = encoder_layerdrop lowerCamelCase__ = decoder_layerdrop lowerCamelCase__ = use_cache lowerCamelCase__ = encoder_layers lowerCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase__ = max_source_positions lowerCamelCase__ = max_target_positions lowerCamelCase__ = num_conv_layers lowerCamelCase__ = list(_lowerCAmelCase ) lowerCamelCase__ = conv_channels lowerCamelCase__ = input_feat_per_channel lowerCamelCase__ = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """ F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' F'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,is_encoder_decoder=_lowerCAmelCase ,decoder_start_token_id=_lowerCAmelCase ,**_lowerCAmelCase ,)
50
'''simple docstring''' import argparse import os import re import packaging.version UpperCamelCase : List[Any] = 'examples/' UpperCamelCase : int = { 'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), 'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCamelCase : Any = { 'init': 'src/transformers/__init__.py', 'setup': 'setup.py', } UpperCamelCase : Any = 'README.md' def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern] lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase ) lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : str ): for folder, directories, fnames in os.walk(__lowerCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not patch: update_version_in_examples(__lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = """🤗 Transformers currently provides the following architectures""" lowerCamelCase__ = """1. Want to contribute a new model?""" with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Find the start of the list. lowerCamelCase__ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase__ = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__lowerCAmelCase ) def A__ ( ): with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0] return packaging.version.parse(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Union[str, Any]=False ): lowerCamelCase__ = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase__ = default_version.base_version elif patch: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = default_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def A__ ( ): lowerCamelCase__ = get_version() lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase__ = current_version.base_version # Check with the user we got that right. lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = dev_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCamelCase : Any = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
50
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = ['image_processor', 'tokenizer'] _UpperCamelCase = 'ViTImageProcessor' _UpperCamelCase = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,**_lowerCAmelCase ): lowerCamelCase__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,_lowerCAmelCase ,) lowerCamelCase__ = kwargs.pop("""feature_extractor""" ) lowerCamelCase__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_lowerCAmelCase ,_lowerCAmelCase ) def __call__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,**_lowerCAmelCase ): if text is None and visual_prompt is None and images is None: raise ValueError("""You have to specify either text, visual prompt or images.""" ) if text is not None and visual_prompt is not None: raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" ) if text is not None: lowerCamelCase__ = self.tokenizer(_lowerCAmelCase ,return_tensors=_lowerCAmelCase ,**_lowerCAmelCase ) if visual_prompt is not None: lowerCamelCase__ = self.image_processor(_lowerCAmelCase ,return_tensors=_lowerCAmelCase ,**_lowerCAmelCase ) if images is not None: lowerCamelCase__ = self.image_processor(_lowerCAmelCase ,return_tensors=_lowerCAmelCase ,**_lowerCAmelCase ) if visual_prompt is not None and images is not None: lowerCamelCase__ = { """pixel_values""": image_features.pixel_values, """conditional_pixel_values""": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowerCamelCase__ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowerCamelCase__ = { """conditional_pixel_values""": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_lowerCAmelCase ) ,tensor_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): return self.tokenizer.batch_decode(*_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): return self.tokenizer.decode(*_lowerCAmelCase ,**_lowerCAmelCase ) @property def UpperCamelCase_ ( self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_lowerCAmelCase ,) return self.image_processor_class @property def UpperCamelCase_ ( self ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_lowerCAmelCase ,) return self.image_processor
50
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase : int = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } UpperCamelCase : Tuple = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } UpperCamelCase : Dict = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = SqueezeBertTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**_lowerCAmelCase ) lowerCamelCase__ = do_lower_case def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return number | (1 << position) def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return number & ~(1 << position) def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return number ^ (1 << position) def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return ((number >> position) & 1) == 1 def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
50
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A__ ( __lowerCAmelCase : Any ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def A__ ( __lowerCAmelCase : str ): # word like '180' or '身高' or '神' for char in word: lowerCamelCase__ = ord(__lowerCAmelCase ) if not _is_chinese_char(__lowerCAmelCase ): return 0 return 1 def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = set() for token in tokens: lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase ) if chinese_word: word_set.add(__lowerCAmelCase ) lowerCamelCase__ = list(__lowerCAmelCase ) return word_list def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ): if not chinese_word_set: return bert_tokens lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] ) lowerCamelCase__ = bert_tokens lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase ) while start < end: lowerCamelCase__ = True if is_chinese(bert_word[start] ): lowerCamelCase__ = min(end - start , __lowerCAmelCase ) for i in range(__lowerCAmelCase , 1 , -1 ): lowerCamelCase__ = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCamelCase__ = """##""" + bert_word[j] lowerCamelCase__ = start + i lowerCamelCase__ = False break if single_word: start += 1 return bert_word def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ): lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res] ltp_res.extend(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = [] for id in input_ids: lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase ) input_tokens.append(__lowerCAmelCase ) lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__lowerCAmelCase ): if token[:2] == "##": lowerCamelCase__ = token[2:] # save chinese tokens' pos if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ): ref_id.append(__lowerCAmelCase ) ref_ids.append(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) return ref_ids def A__ ( __lowerCAmelCase : Optional[int] ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert ) lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids] f.writelines(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) UpperCamelCase : Any = parser.parse_args() main(args)
50
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase : List[Any] = logging.get_logger(__name__) UpperCamelCase : str = '▁' UpperCamelCase : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} UpperCamelCase : Optional[Any] = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } UpperCamelCase : str = {'vinai/bartpho-syllable': 10_24} class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,) lowerCamelCase__ = vocab_file lowerCamelCase__ = monolingual_vocab_file lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility lowerCamelCase__ = {} lowerCamelCase__ = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(_lowerCAmelCase ) not in self.fairseq_tokens_to_ids: lowerCamelCase__ = cnt cnt += 1 with open(_lowerCAmelCase ,"""r""" ,encoding="""utf-8""" ) as f: for line in f.readlines(): lowerCamelCase__ = line.strip().split()[0] lowerCamelCase__ = len(self.fairseq_tokens_to_ids ) if str(_lowerCAmelCase ) not in self.fairseq_tokens_to_ids: lowerCamelCase__ = len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): lowerCamelCase__ = self.__dict__.copy() lowerCamelCase__ = None lowerCamelCase__ = self.sp_model.serialized_model_proto() return state def __setstate__( self ,_lowerCAmelCase ): lowerCamelCase__ = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCamelCase__ = {} lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] lowerCamelCase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCAmelCase )) + [1] return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase_ ( self ): return len(self.fairseq_ids_to_tokens ) def UpperCamelCase_ ( self ): lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.fairseq_ids_to_tokens[index] def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase ,""" """ ).strip() return out_string def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase__ = os.path.join( _lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCamelCase__ = os.path.join( _lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ,) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase ,"""wb""" ) as fi: lowerCamelCase__ = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( _lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file ,_lowerCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(F'''{str(_lowerCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
50
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : Tuple = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: lowerCamelCase__ = 1024 lowerCamelCase__ = 4096 lowerCamelCase__ = 24 lowerCamelCase__ = 16 lowerCamelCase__ = [5, 11, 17, 23] lowerCamelCase__ = [256, 512, 1024, 1024] lowerCamelCase__ = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = [256, 512, 768, 768] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = (1, 384, 384) lowerCamelCase__ = False lowerCamelCase__ = """project""" if "ade" in checkpoint_url: lowerCamelCase__ = True lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = """ade20k-id2label.json""" lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} lowerCamelCase__ = [1, 150, 480, 480] return config, expected_shape def A__ ( __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : List[Any] ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: lowerCamelCase__ = name.replace("""patch_embed""" , """""" ) if "pos_embed" in name: lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: lowerCamelCase__ = name.replace("""proj""" , """projection""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: lowerCamelCase__ = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: lowerCamelCase__ = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" ) if "head" in name: lowerCamelCase__ = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" ) if "backbone" in name: lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" ) if ".." in name: lowerCamelCase__ = name.replace("""..""" , """.""" ) if "stem.conv" in name: lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layers""" ) if "convolution" in name and "backbone" in name: lowerCamelCase__ = name.replace("""convolution""" , """conv""" ) if "layer" in name and "backbone" in name: lowerCamelCase__ = name.replace("""layer""" , """layers""" ) if "backbone.bit.encoder.bit" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" ) if "embedder.conv" in name: lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" ) return name def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[: config.hidden_size, :] lowerCamelCase__ = in_proj_bias[: config.hidden_size] lowerCamelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase__ = in_proj_bias[-config.hidden_size :] def A__ ( ): lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ): lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(__lowerCAmelCase ) # rename keys for key in state_dict.copy().keys(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) lowerCamelCase__ = val # read in qkv matrices read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() # Check outputs on an image lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384 lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ) # forward pass lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth if show_prediction: lowerCamelCase__ = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) parser.add_argument( '--show_prediction', action='store_true', ) UpperCamelCase : List[str] = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
50
1
'''simple docstring''' import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('>=', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType UpperCamelCase : Union[str, Any] = get_logger(__name__) def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str]=0 ): os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) with FSDP.state_dict_type( __lowerCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowerCamelCase__ = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowerCamelCase__ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' lowerCamelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) if accelerator.process_index == 0: logger.info(F'''Saving model to {output_model_file}''' ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowerCamelCase__ = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) logger.info(F'''Saving model to {output_model_file}''' ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowerCamelCase__ = os.path.join(__lowerCAmelCase , F'''{MODEL_NAME}_{model_index}''' ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) logger.info(F'''Saving model to {ckpt_dir}''' ) lowerCamelCase__ = {"""model""": state_dict} dist_cp.save_state_dict( state_dict=__lowerCAmelCase , storage_writer=dist_cp.FileSystemWriter(__lowerCAmelCase ) , planner=DefaultSavePlanner() , ) logger.info(F'''Model saved to {ckpt_dir}''' ) def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Dict=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( __lowerCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(__lowerCAmelCase ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( """Set the `sync_module_states` flag to `True` so that model states are synced across processes when """ """initializing FSDP object""" ) return lowerCamelCase__ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' lowerCamelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) logger.info(F'''Loading model from {input_model_file}''' ) lowerCamelCase__ = torch.load(__lowerCAmelCase ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowerCamelCase__ = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) logger.info(F'''Loading model from {input_model_file}''' ) lowerCamelCase__ = torch.load(__lowerCAmelCase ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowerCamelCase__ = ( os.path.join(__lowerCAmelCase , F'''{MODEL_NAME}_{model_index}''' ) if F'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading model from {ckpt_dir}''' ) lowerCamelCase__ = {"""model""": model.state_dict()} dist_cp.load_state_dict( state_dict=__lowerCAmelCase , storage_reader=dist_cp.FileSystemReader(__lowerCAmelCase ) , planner=DefaultLoadPlanner() , ) lowerCamelCase__ = state_dict["""model"""] logger.info(F'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int=0 ): os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) with FSDP.state_dict_type( __lowerCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowerCamelCase__ = FSDP.optim_state_dict(__lowerCAmelCase , __lowerCAmelCase ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: lowerCamelCase__ = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) logger.info(F'''Optimizer state saved in {output_optimizer_file}''' ) else: lowerCamelCase__ = os.path.join(__lowerCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) logger.info(F'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(__lowerCAmelCase ) , planner=DefaultSavePlanner() , ) logger.info(F'''Optimizer state saved in {ckpt_dir}''' ) def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( __lowerCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowerCamelCase__ = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: lowerCamelCase__ = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' ) lowerCamelCase__ = torch.load(__lowerCAmelCase ) logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' ) else: lowerCamelCase__ = ( os.path.join(__lowerCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if F'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading Optimizer from {ckpt_dir}''' ) lowerCamelCase__ = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(__lowerCAmelCase ) , ) lowerCamelCase__ = optim_state["""optimizer"""] logger.info(F'''Optimizer loaded from {ckpt_dir}''' ) lowerCamelCase__ = FSDP.optim_state_dict_to_load(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) optimizer.load_state_dict(__lowerCAmelCase )
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Tuple = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'philschmid/bart-large-cnn-samsum' _UpperCamelCase = ( 'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ' 'and returns a summary of the text.' ) _UpperCamelCase = 'summarizer' _UpperCamelCase = AutoTokenizer _UpperCamelCase = AutoModelForSeqaSeqLM _UpperCamelCase = ['text'] _UpperCamelCase = ['text'] def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.pre_processor(_lowerCAmelCase ,return_tensors="""pt""" ,truncation=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.model.generate(**_lowerCAmelCase )[0] def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.pre_processor.decode(_lowerCAmelCase ,skip_special_tokens=_lowerCAmelCase ,clean_up_tokenization_spaces=_lowerCAmelCase )
50
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Dict = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'codegen' _UpperCamelCase = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_ctx lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = rotary_dim lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = use_cache lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__( bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,): super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase ) if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ): # TODO: how to do that better? lowerCamelCase__ = 0 @property def UpperCamelCase_ ( self ): lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" ) lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""} else: lowerCamelCase__ = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCamelCase_ ( self ): return self._config.n_layer @property def UpperCamelCase_ ( self ): return self._config.n_head def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,): lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs( _lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase ) # We need to order the input in the way they appears in the forward() lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase__ = seqlen + 2 lowerCamelCase__ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase__ = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers ) ] lowerCamelCase__ = common_inputs["""attention_mask"""] if self.use_past: lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype lowerCamelCase__ = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 ) return ordered_inputs @property def UpperCamelCase_ ( self ): return 13
50
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : str = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=False ): lowerCamelCase__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') ) # embeddings rename_keys.extend( [ # text embeddings ("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""), ( """text_embeddings.position_embeddings.weight""", """vilt.embeddings.text_embeddings.position_embeddings.weight""", ), ("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""), ( """text_embeddings.token_type_embeddings.weight""", """vilt.embeddings.text_embeddings.token_type_embeddings.weight""", ), ("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""), ("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""), # patch embeddings ("""transformer.cls_token""", """vilt.embeddings.cls_token"""), ("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""), ("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""), ("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""), # token type embeddings ("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""), ] ) # final layernorm + pooler rename_keys.extend( [ ("""transformer.norm.weight""", """vilt.layernorm.weight"""), ("""transformer.norm.bias""", """vilt.layernorm.bias"""), ("""pooler.dense.weight""", """vilt.pooler.dense.weight"""), ("""pooler.dense.bias""", """vilt.pooler.dense.bias"""), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ("""vqa_classifier.0.weight""", """classifier.0.weight"""), ("""vqa_classifier.0.bias""", """classifier.0.bias"""), ("""vqa_classifier.1.weight""", """classifier.1.weight"""), ("""vqa_classifier.1.bias""", """classifier.1.bias"""), ("""vqa_classifier.3.weight""", """classifier.3.weight"""), ("""vqa_classifier.3.bias""", """classifier.3.bias"""), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ("""nlvr2_classifier.0.weight""", """classifier.0.weight"""), ("""nlvr2_classifier.0.bias""", """classifier.0.bias"""), ("""nlvr2_classifier.1.weight""", """classifier.1.weight"""), ("""nlvr2_classifier.1.bias""", """classifier.1.bias"""), ("""nlvr2_classifier.3.weight""", """classifier.3.weight"""), ("""nlvr2_classifier.3.bias""", """classifier.3.bias"""), ] ) else: pass return rename_keys def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ): for i in range(config.num_hidden_layers ): lowerCamelCase__ = """vilt.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' ) lowerCamelCase__ = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase__ = in_proj_bias[: config.hidden_size] lowerCamelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase__ = in_proj_bias[-config.hidden_size :] def A__ ( __lowerCAmelCase : Optional[Any] ): lowerCamelCase__ = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = dct.pop(__lowerCAmelCase ) lowerCamelCase__ = val @torch.no_grad() def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] ): lowerCamelCase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=__lowerCAmelCase ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False if "vqa" in checkpoint_url: lowerCamelCase__ = True lowerCamelCase__ = 3129 lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = """vqa2-id2label.json""" lowerCamelCase__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} lowerCamelCase__ = ViltForQuestionAnswering(__lowerCAmelCase ) elif "nlvr" in checkpoint_url: lowerCamelCase__ = True lowerCamelCase__ = 2 lowerCamelCase__ = {0: """False""", 1: """True"""} lowerCamelCase__ = {v: k for k, v in config.idalabel.items()} lowerCamelCase__ = 3 lowerCamelCase__ = ViltForImagesAndTextClassification(__lowerCAmelCase ) elif "irtr" in checkpoint_url: lowerCamelCase__ = True lowerCamelCase__ = ViltForImageAndTextRetrieval(__lowerCAmelCase ) elif "mlm_itm" in checkpoint_url: lowerCamelCase__ = True lowerCamelCase__ = ViltForMaskedLM(__lowerCAmelCase ) else: raise ValueError("""Unknown model type""" ) # load state_dict of original model, remove and rename some keys lowerCamelCase__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )["""state_dict"""] lowerCamelCase__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for src, dest in rename_keys: rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase ) if mlm_model or irtr_model: lowerCamelCase__ = ["""itm_score.fc.weight""", """itm_score.fc.bias"""] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) # load state dict into HuggingFace model model.eval() if mlm_model: lowerCamelCase__ , lowerCamelCase__ = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(__lowerCAmelCase ) # Define processor lowerCamelCase__ = ViltImageProcessor(size=384 ) lowerCamelCase__ = BertTokenizer.from_pretrained("""bert-base-uncased""" ) lowerCamelCase__ = ViltProcessor(__lowerCAmelCase , __lowerCAmelCase ) # Forward pass on example inputs (image + text) if nlvr_model: lowerCamelCase__ = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=__lowerCAmelCase ).raw ) lowerCamelCase__ = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=__lowerCAmelCase ).raw ) lowerCamelCase__ = ( """The left image contains twice the number of dogs as the right image, and at least two dogs in total are""" """ standing.""" ) lowerCamelCase__ = processor(__lowerCAmelCase , __lowerCAmelCase , return_tensors="""pt""" ) lowerCamelCase__ = processor(__lowerCAmelCase , __lowerCAmelCase , return_tensors="""pt""" ) lowerCamelCase__ = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: lowerCamelCase__ = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=__lowerCAmelCase ).raw ) if mlm_model: lowerCamelCase__ = """a bunch of [MASK] laying on a [MASK].""" else: lowerCamelCase__ = """How many cats are there?""" lowerCamelCase__ = processor(__lowerCAmelCase , __lowerCAmelCase , return_tensors="""pt""" ) lowerCamelCase__ = model(**__lowerCAmelCase ) # Verify outputs if mlm_model: lowerCamelCase__ = torch.Size([1, 11, 3_0522] ) lowerCamelCase__ = torch.tensor([-12.5061, -12.5123, -12.5174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , __lowerCAmelCase , atol=1e-4 ) # verify masked token prediction equals "cats" lowerCamelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: lowerCamelCase__ = torch.Size([1, 3129] ) lowerCamelCase__ = torch.tensor([-15.9495, -18.1472, -10.3041] ) assert torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , __lowerCAmelCase , atol=1e-4 ) # verify vqa prediction equals "2" lowerCamelCase__ = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: lowerCamelCase__ = torch.Size([1, 2] ) lowerCamelCase__ = torch.tensor([-2.8721, 2.1291] ) assert torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) assert outputs.logits.shape == expected_shape Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) processor.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt', type=str, help='URL of the checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) UpperCamelCase : Union[str, Any] = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
50
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : int = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return int((input_a, input_a).count(1 ) != 0 ) def A__ ( ): assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
50
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(__lowerCAmelCase , torch.Tensor ): raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" ) lowerCamelCase__ = v.half() if save_path is None: # overwrite src_path lowerCamelCase__ = src_path torch.save(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": fire.Fire(convert)
50
1
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal UpperCamelCase : int = datasets.utils.logging.get_logger(__name__) UpperCamelCase : List[str] = ['names', 'prefix'] UpperCamelCase : List[str] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] UpperCamelCase : Optional[int] = ['encoding_errors', 'on_bad_lines'] UpperCamelCase : Optional[int] = ['date_format'] @dataclass class UpperCamelCase__ (datasets.BuilderConfig ): '''simple docstring''' _UpperCamelCase = "," _UpperCamelCase = None _UpperCamelCase = "infer" _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = True _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = False _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = True _UpperCamelCase = True _UpperCamelCase = False _UpperCamelCase = True _UpperCamelCase = None _UpperCamelCase = "." _UpperCamelCase = None _UpperCamelCase = '"' _UpperCamelCase = 0 _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = True _UpperCamelCase = True _UpperCamelCase = 0 _UpperCamelCase = True _UpperCamelCase = False _UpperCamelCase = None _UpperCamelCase = 10000 _UpperCamelCase = None _UpperCamelCase = "strict" _UpperCamelCase = "error" _UpperCamelCase = None def UpperCamelCase_ ( self ): if self.delimiter is not None: lowerCamelCase__ = self.delimiter if self.column_names is not None: lowerCamelCase__ = self.column_names @property def UpperCamelCase_ ( self ): lowerCamelCase__ = { """sep""": self.sep, """header""": self.header, """names""": self.names, """index_col""": self.index_col, """usecols""": self.usecols, """prefix""": self.prefix, """mangle_dupe_cols""": self.mangle_dupe_cols, """engine""": self.engine, """converters""": self.converters, """true_values""": self.true_values, """false_values""": self.false_values, """skipinitialspace""": self.skipinitialspace, """skiprows""": self.skiprows, """nrows""": self.nrows, """na_values""": self.na_values, """keep_default_na""": self.keep_default_na, """na_filter""": self.na_filter, """verbose""": self.verbose, """skip_blank_lines""": self.skip_blank_lines, """thousands""": self.thousands, """decimal""": self.decimal, """lineterminator""": self.lineterminator, """quotechar""": self.quotechar, """quoting""": self.quoting, """escapechar""": self.escapechar, """comment""": self.comment, """encoding""": self.encoding, """dialect""": self.dialect, """error_bad_lines""": self.error_bad_lines, """warn_bad_lines""": self.warn_bad_lines, """skipfooter""": self.skipfooter, """doublequote""": self.doublequote, """memory_map""": self.memory_map, """float_precision""": self.float_precision, """chunksize""": self.chunksize, """encoding_errors""": self.encoding_errors, """on_bad_lines""": self.on_bad_lines, """date_format""": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,_lowerCAmelCase ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class UpperCamelCase__ (datasets.ArrowBasedBuilder ): '''simple docstring''' _UpperCamelCase = CsvConfig def UpperCamelCase_ ( self ): return datasets.DatasetInfo(features=self.config.features ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) lowerCamelCase__ = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCAmelCase ,(str, list, tuple) ): lowerCamelCase__ = data_files if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [files] lowerCamelCase__ = [dl_manager.iter_files(_lowerCAmelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""files""": files} )] lowerCamelCase__ = [] for split_name, files in data_files.items(): if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [files] lowerCamelCase__ = [dl_manager.iter_files(_lowerCAmelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_lowerCAmelCase ,gen_kwargs={"""files""": files} ) ) return splits def UpperCamelCase_ ( self ,_lowerCAmelCase ): if self.config.features is not None: lowerCamelCase__ = self.config.features.arrow_schema if all(not require_storage_cast(_lowerCAmelCase ) for feature in self.config.features.values() ): # cheaper cast lowerCamelCase__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=_lowerCAmelCase ) else: # more expensive cast; allows str <-> int/float or str to Audio for example lowerCamelCase__ = table_cast(_lowerCAmelCase ,_lowerCAmelCase ) return pa_table def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str lowerCamelCase__ = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(_lowerCAmelCase ) else object for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ): lowerCamelCase__ = pd.read_csv(_lowerCAmelCase ,iterator=_lowerCAmelCase ,dtype=_lowerCAmelCase ,**self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(_lowerCAmelCase ): lowerCamelCase__ = pa.Table.from_pandas(_lowerCAmelCase ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_lowerCAmelCase ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}''' ) raise
50
'''simple docstring''' import os from pathlib import Path def A__ ( ): from torch.utils.cpp_extension import load lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" lowerCamelCase__ = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
50
1
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=2 ,_lowerCAmelCase=24 ,_lowerCAmelCase=16 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=5 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=None ,_lowerCAmelCase=2 ,_lowerCAmelCase=2 ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = patch_size lowerCamelCase__ = max_length lowerCamelCase__ = num_mel_bins lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = scope lowerCamelCase__ = frequency_stride lowerCamelCase__ = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) lowerCamelCase__ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 lowerCamelCase__ = (self.max_length - self.patch_size) // self.time_stride + 1 lowerCamelCase__ = frequency_out_dimension * time_out_dimension lowerCamelCase__ = num_patches + 2 def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, input_values, labels def UpperCamelCase_ ( self ): return ASTConfig( patch_size=self.patch_size ,max_length=self.max_length ,num_mel_bins=self.num_mel_bins ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,frequency_stride=self.frequency_stride ,time_stride=self.time_stride ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = ASTModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = config_and_inputs lowerCamelCase__ = {"""input_values""": input_values} return config, inputs_dict @require_torch class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) _UpperCamelCase = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def UpperCamelCase_ ( self ): lowerCamelCase__ = ASTModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""AST does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase ,nn.Linear ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""input_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = ASTModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" ) lowerCamelCase__ , lowerCamelCase__ = torchaudio.load(__lowerCAmelCase ) return audio, sampling_rate @require_torch @require_torchaudio class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ( ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ) if is_torchaudio_available() else None ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = self.default_feature_extractor lowerCamelCase__ = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(_lowerCAmelCase ) lowerCamelCase__ = self.default_feature_extractor lowerCamelCase__ , lowerCamelCase__ = prepare_audio() lowerCamelCase__ = audio.squeeze().numpy() lowerCamelCase__ = feature_extractor(_lowerCAmelCase ,sampling_rate=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase__ = model(**_lowerCAmelCase ) # verify the logits lowerCamelCase__ = torch.Size((1, 5_27) ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1E-4 ) )
50
'''simple docstring''' def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ): lowerCamelCase__ = len(__lowerCAmelCase ) print("""The following activities are selected:""" ) # The first activity is always selected lowerCamelCase__ = 0 print(__lowerCAmelCase , end=""",""" ) # Consider rest of the activities for j in range(__lowerCAmelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(__lowerCAmelCase , end=""",""" ) lowerCamelCase__ = j if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5] UpperCamelCase : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def A__ ( __lowerCAmelCase : int = 5000 ): lowerCamelCase__ = [(i * (3 * i - 1)) // 2 for i in range(1 , __lowerCAmelCase )] for i, pentagonal_i in enumerate(__lowerCAmelCase ): for j in range(__lowerCAmelCase , len(__lowerCAmelCase ) ): lowerCamelCase__ = pentagonal_nums[j] lowerCamelCase__ = pentagonal_i + pentagonal_j lowerCamelCase__ = pentagonal_j - pentagonal_i if is_pentagonal(__lowerCAmelCase ) and is_pentagonal(__lowerCAmelCase ): return b return -1 if __name__ == "__main__": print(F'{solution() = }')
50
'''simple docstring''' import warnings from ..trainer import Trainer from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ): warnings.warn( """`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """ """instead.""" ,_lowerCAmelCase ,) super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : int=[] ): lowerCamelCase__ = size[0] - overlap_pixels * 2 lowerCamelCase__ = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels lowerCamelCase__ = np.ones((size_y, size_x) , dtype=np.uinta ) * 255 lowerCamelCase__ = np.pad(__lowerCAmelCase , mode="""linear_ramp""" , pad_width=__lowerCAmelCase , end_values=0 ) if "l" in remove_borders: lowerCamelCase__ = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: lowerCamelCase__ = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: lowerCamelCase__ = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: lowerCamelCase__ = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : str ): return max(__lowerCAmelCase , min(__lowerCAmelCase , __lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : [int] , __lowerCAmelCase : [int] , __lowerCAmelCase : [int] ): return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def A__ ( __lowerCAmelCase : [int] , __lowerCAmelCase : int , __lowerCAmelCase : [int] ): lowerCamelCase__ = list(__lowerCAmelCase ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap lowerCamelCase__ = clamp_rect(__lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] ) return rect def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ): lowerCamelCase__ = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(__lowerCAmelCase , (original_slice, 0) ) return result def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Any ): lowerCamelCase__ = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) lowerCamelCase__ = tile.crop(__lowerCAmelCase ) return tile def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ): lowerCamelCase__ = n % d return n - divisor class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 3_50 ,): super().__init__( vae=_lowerCAmelCase ,text_encoder=_lowerCAmelCase ,tokenizer=_lowerCAmelCase ,unet=_lowerCAmelCase ,low_res_scheduler=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,max_noise_level=_lowerCAmelCase ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ): torch.manual_seed(0 ) lowerCamelCase__ = ( min(image.size[0] - (tile_size + original_image_slice) ,x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) ,y * tile_size ), min(image.size[0] ,(x + 1) * tile_size ), min(image.size[1] ,(y + 1) * tile_size ), ) lowerCamelCase__ = add_overlap_rect(_lowerCAmelCase ,_lowerCAmelCase ,image.size ) lowerCamelCase__ = image.crop(_lowerCAmelCase ) lowerCamelCase__ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] lowerCamelCase__ = translated_slice_x - (original_image_slice / 2) lowerCamelCase__ = max(0 ,_lowerCAmelCase ) lowerCamelCase__ = squeeze_tile(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = to_input.size lowerCamelCase__ = to_input.resize((tile_size, tile_size) ,Image.BICUBIC ) lowerCamelCase__ = super(_lowerCAmelCase ,self ).__call__(image=_lowerCAmelCase ,**_lowerCAmelCase ).images[0] lowerCamelCase__ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) ,Image.BICUBIC ) lowerCamelCase__ = unsqueeze_tile(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) ,Image.BICUBIC ) lowerCamelCase__ = [] if x == 0: remove_borders.append("""l""" ) elif crop_rect[2] == image.size[0]: remove_borders.append("""r""" ) if y == 0: remove_borders.append("""t""" ) elif crop_rect[3] == image.size[1]: remove_borders.append("""b""" ) lowerCamelCase__ = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) ,tile_border * 4 ,remove_borders=_lowerCAmelCase ) ,mode="""L""" ,) final_image.paste( _lowerCAmelCase ,(crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) ,_lowerCAmelCase ) @torch.no_grad() def __call__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 75 ,_lowerCAmelCase = 9.0 ,_lowerCAmelCase = 50 ,_lowerCAmelCase = None ,_lowerCAmelCase = 1 ,_lowerCAmelCase = 0.0 ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = 1 ,_lowerCAmelCase = 1_28 ,_lowerCAmelCase = 32 ,_lowerCAmelCase = 32 ,): lowerCamelCase__ = Image.new("""RGB""" ,(image.size[0] * 4, image.size[1] * 4) ) lowerCamelCase__ = math.ceil(image.size[0] / tile_size ) lowerCamelCase__ = math.ceil(image.size[1] / tile_size ) lowerCamelCase__ = tcx * tcy lowerCamelCase__ = 0 for y in range(_lowerCAmelCase ): for x in range(_lowerCAmelCase ): self._process_tile( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,prompt=_lowerCAmelCase ,num_inference_steps=_lowerCAmelCase ,guidance_scale=_lowerCAmelCase ,noise_level=_lowerCAmelCase ,negative_prompt=_lowerCAmelCase ,num_images_per_prompt=_lowerCAmelCase ,eta=_lowerCAmelCase ,generator=_lowerCAmelCase ,latents=_lowerCAmelCase ,) current_count += 1 if callback is not None: callback({"""progress""": current_count / total_tile_count, """image""": final_image} ) return final_image def A__ ( ): # Run a demo lowerCamelCase__ = """stabilityai/stable-diffusion-x4-upscaler""" lowerCamelCase__ = StableDiffusionTiledUpscalePipeline.from_pretrained(__lowerCAmelCase , revision="""fp16""" , torch_dtype=torch.floataa ) lowerCamelCase__ = pipe.to("""cuda""" ) lowerCamelCase__ = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" ) def callback(__lowerCAmelCase : str ): print(F'''progress: {obj["progress"]:.4f}''' ) obj["image"].save("""diffusers_library_progress.jpg""" ) lowerCamelCase__ = pipe(image=__lowerCAmelCase , prompt="""Black font, white background, vector""" , noise_level=40 , callback=__lowerCAmelCase ) final_image.save("""diffusers_library.jpg""" ) if __name__ == "__main__": main()
50
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = [] for line in lines: lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments if line: filtered_lines.append(__lowerCAmelCase ) lowerCamelCase__ = """\n""".join(__lowerCAmelCase ) # Make a hash from all this code lowerCamelCase__ = full_str.encode("""utf-8""" ) return shaaaa(__lowerCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCamelCase : Dict = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCamelCase : str = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCamelCase : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
50
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = PegasusConfig _UpperCamelCase = {} _UpperCamelCase = 'gelu' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=7 ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,_lowerCAmelCase=99 ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=40 ,_lowerCAmelCase=2 ,_lowerCAmelCase=1 ,_lowerCAmelCase=0 ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = seq_length lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = eos_token_id lowerCamelCase__ = pad_token_id lowerCamelCase__ = bos_token_id def UpperCamelCase_ ( self ): lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) lowerCamelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 ) lowerCamelCase__ = tf.concat([input_ids, eos_tensor] ,axis=1 ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowerCamelCase__ = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) lowerCamelCase__ = prepare_pegasus_inputs_dict(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) return config, inputs_dict def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFPegasusModel(config=_lowerCAmelCase ).get_decoder() lowerCamelCase__ = inputs_dict["""input_ids"""] lowerCamelCase__ = input_ids[:1, :] lowerCamelCase__ = inputs_dict["""attention_mask"""][:1, :] lowerCamelCase__ = inputs_dict["""head_mask"""] lowerCamelCase__ = 1 # first forward pass lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,head_mask=_lowerCAmelCase ,use_cache=_lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase__ = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowerCamelCase__ = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta ) # append to next input_ids and lowerCamelCase__ = tf.concat([input_ids, next_tokens] ,axis=-1 ) lowerCamelCase__ = tf.concat([attention_mask, next_attn_mask] ,axis=-1 ) lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase )[0] lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,past_key_values=_lowerCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] ) # select random slice lowerCamelCase__ = int(ids_tensor((1,) ,output_from_past.shape[-1] ) ) lowerCamelCase__ = output_from_no_past[:, -3:, random_slice_idx] lowerCamelCase__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_lowerCAmelCase ,_lowerCAmelCase ,rtol=1E-3 ) def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : str=None , ): if attention_mask is None: lowerCamelCase__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCamelCase__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCamelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () _UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else () _UpperCamelCase = ( { 'conversational': TFPegasusForConditionalGeneration, 'feature-extraction': TFPegasusModel, 'summarization': TFPegasusForConditionalGeneration, 'text2text-generation': TFPegasusForConditionalGeneration, 'translation': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) _UpperCamelCase = True _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = TFPegasusModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_lowerCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' _UpperCamelCase = [ ' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.', ' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ', ] _UpperCamelCase = [ 'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to' ' reduce the risk of wildfires.', 'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.', ] # differs slightly from pytorch, likely due to numerical differences in linear layers _UpperCamelCase = 'google/pegasus-xsum' @cached_property def UpperCamelCase_ ( self ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCamelCase_ ( self ): lowerCamelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def UpperCamelCase_ ( self ,**_lowerCAmelCase ): lowerCamelCase__ = self.translate_src_text(**_lowerCAmelCase ) assert self.expected_text == generated_words def UpperCamelCase_ ( self ,**_lowerCAmelCase ): lowerCamelCase__ = self.tokenizer(self.src_text ,**_lowerCAmelCase ,padding=_lowerCAmelCase ,return_tensors="""tf""" ) lowerCamelCase__ = self.model.generate( model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 ,use_cache=_lowerCAmelCase ,) lowerCamelCase__ = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_lowerCAmelCase ) return generated_words @slow def UpperCamelCase_ ( self ): self._assert_generated_batch_equal_expected()
50
'''simple docstring''' import operator def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ): lowerCamelCase__ = operator.lt if reverse else operator.gt lowerCamelCase__ = solution or [] if not arr: return solution lowerCamelCase__ = [arr.pop(0 )] for i, item in enumerate(__lowerCAmelCase ): if _operator(__lowerCAmelCase , sublist[-1] ): sublist.append(__lowerCAmelCase ) arr.pop(__lowerCAmelCase ) # merging sublist into solution list if not solution: solution.extend(__lowerCAmelCase ) else: while sublist: lowerCamelCase__ = sublist.pop(0 ) for i, xx in enumerate(__lowerCAmelCase ): if not _operator(__lowerCAmelCase , __lowerCAmelCase ): solution.insert(__lowerCAmelCase , __lowerCAmelCase ) break else: solution.append(__lowerCAmelCase ) strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
50
1
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# UpperCamelCase : List[str] = [ # (stable-diffusion, HF Diffusers) ('time_embed.0.weight', 'time_embedding.linear_1.weight'), ('time_embed.0.bias', 'time_embedding.linear_1.bias'), ('time_embed.2.weight', 'time_embedding.linear_2.weight'), ('time_embed.2.bias', 'time_embedding.linear_2.bias'), ('input_blocks.0.0.weight', 'conv_in.weight'), ('input_blocks.0.0.bias', 'conv_in.bias'), ('out.0.weight', 'conv_norm_out.weight'), ('out.0.bias', 'conv_norm_out.bias'), ('out.2.weight', 'conv_out.weight'), ('out.2.bias', 'conv_out.bias'), ] UpperCamelCase : List[str] = [ # (stable-diffusion, HF Diffusers) ('in_layers.0', 'norm1'), ('in_layers.2', 'conv1'), ('out_layers.0', 'norm2'), ('out_layers.3', 'conv2'), ('emb_layers.1', 'time_emb_proj'), ('skip_connection', 'conv_shortcut'), ] UpperCamelCase : List[str] = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks UpperCamelCase : str = F'down_blocks.{i}.resnets.{j}.' UpperCamelCase : List[Any] = F'input_blocks.{3*i + j + 1}.0.' unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 UpperCamelCase : Optional[int] = F'down_blocks.{i}.attentions.{j}.' UpperCamelCase : List[str] = F'input_blocks.{3*i + j + 1}.1.' unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks UpperCamelCase : Union[str, Any] = F'up_blocks.{i}.resnets.{j}.' UpperCamelCase : int = F'output_blocks.{3*i + j}.0.' unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 UpperCamelCase : str = F'up_blocks.{i}.attentions.{j}.' UpperCamelCase : Optional[Any] = F'output_blocks.{3*i + j}.1.' unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 UpperCamelCase : Optional[Any] = F'down_blocks.{i}.downsamplers.0.conv.' UpperCamelCase : Dict = F'input_blocks.{3*(i+1)}.0.op.' unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 UpperCamelCase : List[Any] = F'up_blocks.{i}.upsamplers.0.' UpperCamelCase : Optional[int] = F'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.' unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) UpperCamelCase : List[str] = 'mid_block.attentions.0.' UpperCamelCase : Any = 'middle_block.1.' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): UpperCamelCase : List[str] = F'mid_block.resnets.{j}.' UpperCamelCase : str = F'middle_block.{2*j}.' unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def A__ ( __lowerCAmelCase : List[str] ): # buyer beware: this is a *brittle* function, # and correct output requires that all of these pieces interact in # the exact order in which I have arranged them. lowerCamelCase__ = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: lowerCamelCase__ = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: lowerCamelCase__ = v.replace(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: lowerCamelCase__ = v.replace(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = v lowerCamelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# UpperCamelCase : Optional[Any] = [ # (stable-diffusion, HF Diffusers) ('nin_shortcut', 'conv_shortcut'), ('norm_out', 'conv_norm_out'), ('mid.attn_1.', 'mid_block.attentions.0.'), ] for i in range(4): # down_blocks have two resnets for j in range(2): UpperCamelCase : List[Any] = F'encoder.down_blocks.{i}.resnets.{j}.' UpperCamelCase : Tuple = F'encoder.down.{i}.block.{j}.' vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: UpperCamelCase : Dict = F'down_blocks.{i}.downsamplers.0.' UpperCamelCase : int = F'down.{i}.downsample.' vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) UpperCamelCase : int = F'up_blocks.{i}.upsamplers.0.' UpperCamelCase : Tuple = F'up.{3-i}.upsample.' vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): UpperCamelCase : str = F'decoder.up_blocks.{i}.resnets.{j}.' UpperCamelCase : int = F'decoder.up.{3-i}.block.{j}.' vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): UpperCamelCase : Optional[Any] = F'mid_block.resnets.{i}.' UpperCamelCase : List[str] = F'mid.block_{i+1}.' vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) UpperCamelCase : Any = [ # (stable-diffusion, HF Diffusers) ('norm.', 'group_norm.'), ('q.', 'query.'), ('k.', 'key.'), ('v.', 'value.'), ('proj_out.', 'proj_attn.'), ] def A__ ( __lowerCAmelCase : Dict ): # convert HF linear weights to SD conv2d weights return w.reshape(*w.shape , 1 , 1 ) def A__ ( __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: lowerCamelCase__ = v.replace(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: lowerCamelCase__ = v.replace(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = v lowerCamelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()} lowerCamelCase__ = ["""q""", """k""", """v""", """proj_out"""] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if F'''mid.attn_1.{weight_name}.weight''' in k: print(F'''Reshaping {k} for SD format''' ) lowerCamelCase__ = reshape_weight_for_sd(__lowerCAmelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# UpperCamelCase : List[Any] = [ # (stable-diffusion, HF Diffusers) ('resblocks.', 'text_model.encoder.layers.'), ('ln_1', 'layer_norm1'), ('ln_2', 'layer_norm2'), ('.c_fc.', '.fc1.'), ('.c_proj.', '.fc2.'), ('.attn', '.self_attn'), ('ln_final.', 'transformer.text_model.final_layer_norm.'), ('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'), ('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'), ] UpperCamelCase : List[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} UpperCamelCase : Tuple = re.compile('|'.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp UpperCamelCase : List[Any] = {'q': 0, 'k': 1, 'v': 2} def A__ ( __lowerCAmelCase : str ): lowerCamelCase__ = {} lowerCamelCase__ = {} lowerCamelCase__ = {} for k, v in text_enc_dict.items(): if ( k.endswith(""".self_attn.q_proj.weight""" ) or k.endswith(""".self_attn.k_proj.weight""" ) or k.endswith(""".self_attn.v_proj.weight""" ) ): lowerCamelCase__ = k[: -len(""".q_proj.weight""" )] lowerCamelCase__ = k[-len("""q_proj.weight""" )] if k_pre not in capture_qkv_weight: lowerCamelCase__ = [None, None, None] lowerCamelCase__ = v continue if ( k.endswith(""".self_attn.q_proj.bias""" ) or k.endswith(""".self_attn.k_proj.bias""" ) or k.endswith(""".self_attn.v_proj.bias""" ) ): lowerCamelCase__ = k[: -len(""".q_proj.bias""" )] lowerCamelCase__ = k[-len("""q_proj.bias""" )] if k_pre not in capture_qkv_bias: lowerCamelCase__ = [None, None, None] lowerCamelCase__ = v continue lowerCamelCase__ = textenc_pattern.sub(lambda __lowerCAmelCase : protected[re.escape(m.group(0 ) )] , __lowerCAmelCase ) lowerCamelCase__ = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCamelCase__ = textenc_pattern.sub(lambda __lowerCAmelCase : protected[re.escape(m.group(0 ) )] , __lowerCAmelCase ) lowerCamelCase__ = torch.cat(__lowerCAmelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCamelCase__ = textenc_pattern.sub(lambda __lowerCAmelCase : protected[re.escape(m.group(0 ) )] , __lowerCAmelCase ) lowerCamelCase__ = torch.cat(__lowerCAmelCase ) return new_state_dict def A__ ( __lowerCAmelCase : Optional[Any] ): return text_enc_dict if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.' ) UpperCamelCase : Union[str, Any] = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors UpperCamelCase : int = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors') UpperCamelCase : Optional[Any] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors') UpperCamelCase : Dict = osp.join(args.model_path, 'text_encoder', 'model.safetensors') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): UpperCamelCase : Optional[Any] = load_file(unet_path, device='cpu') else: UpperCamelCase : Optional[int] = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin') UpperCamelCase : List[str] = torch.load(unet_path, map_location='cpu') if osp.exists(vae_path): UpperCamelCase : Any = load_file(vae_path, device='cpu') else: UpperCamelCase : List[Any] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin') UpperCamelCase : List[str] = torch.load(vae_path, map_location='cpu') if osp.exists(text_enc_path): UpperCamelCase : Tuple = load_file(text_enc_path, device='cpu') else: UpperCamelCase : Optional[Any] = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin') UpperCamelCase : Dict = torch.load(text_enc_path, map_location='cpu') # Convert the UNet model UpperCamelCase : Optional[int] = convert_unet_state_dict(unet_state_dict) UpperCamelCase : Union[str, Any] = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model UpperCamelCase : Optional[Any] = convert_vae_state_dict(vae_state_dict) UpperCamelCase : Any = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper UpperCamelCase : Dict = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm UpperCamelCase : int = {'transformer.' + k: v for k, v in text_enc_dict.items()} UpperCamelCase : Optional[Any] = convert_text_enc_state_dict_vaa(text_enc_dict) UpperCamelCase : List[Any] = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()} else: UpperCamelCase : Dict = convert_text_enc_state_dict(text_enc_dict) UpperCamelCase : Dict = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint UpperCamelCase : List[str] = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: UpperCamelCase : Optional[Any] = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: UpperCamelCase : Union[str, Any] = {'state_dict': state_dict} torch.save(state_dict, args.checkpoint_path)
50
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A__ ( __lowerCAmelCase : dict ): return (data["data"], data["target"]) def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ): lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__lowerCAmelCase , __lowerCAmelCase ) # Predict target for test data lowerCamelCase__ = xgb.predict(__lowerCAmelCase ) lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 ) return predictions def A__ ( ): lowerCamelCase__ = fetch_california_housing() lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split( __lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 ) lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ): lowerCamelCase__ = len(__lowerCAmelCase ) print("""The following activities are selected:""" ) # The first activity is always selected lowerCamelCase__ = 0 print(__lowerCAmelCase , end=""",""" ) # Consider rest of the activities for j in range(__lowerCAmelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(__lowerCAmelCase , end=""",""" ) lowerCamelCase__ = j if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5] UpperCamelCase : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
50
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = jnp.ones((batch_size, length) ) / length return scores def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 20 lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase ) # tweak scores to not be uniform anymore lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create ramp distribution lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowerCamelCase__ = 5 lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 ) lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy() lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # check edge cases with negative and extreme logits lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowerCamelCase__ = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) # check that min length is applied at length 5 lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 ) lowerCamelCase__ = 5 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] ) # check that min length is not applied anymore at length 15 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = 15 lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the bos_token_id score lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 ) lowerCamelCase__ = 1 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = 5 lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the eos_token_id when max_length is reached lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 ) lowerCamelCase__ = 4 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # with processor list lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores # with processor list def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
50
1
'''simple docstring''' import requests from bsa import BeautifulSoup def A__ ( __lowerCAmelCase : str = "AAPL" ): lowerCamelCase__ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' lowerCamelCase__ = BeautifulSoup(requests.get(__lowerCAmelCase ).text , """html.parser""" ) lowerCamelCase__ = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""" , class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase : Any = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Tuple = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
50
1
'''simple docstring''' from __future__ import annotations from PIL import Image # Define glider example UpperCamelCase : Optional[Any] = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example UpperCamelCase : Tuple = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = [] for i in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours lowerCamelCase__ = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(__lowerCAmelCase ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(__lowerCAmelCase ) - 1: neighbour_count += cells[i + 1][j] if i < len(__lowerCAmelCase ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. lowerCamelCase__ = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(__lowerCAmelCase ) return next_generation def A__ ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ): lowerCamelCase__ = [] for _ in range(__lowerCAmelCase ): # Create output image lowerCamelCase__ = Image.new("""RGB""" , (len(cells[0] ), len(__lowerCAmelCase )) ) lowerCamelCase__ = img.load() # Save cells to image for x in range(len(__lowerCAmelCase ) ): for y in range(len(cells[0] ) ): lowerCamelCase__ = 255 - cells[y][x] * 255 lowerCamelCase__ = (colour, colour, colour) # Save image images.append(__lowerCAmelCase ) lowerCamelCase__ = new_generation(__lowerCAmelCase ) return images if __name__ == "__main__": UpperCamelCase : Dict = generate_images(GLIDER, 16) images[0].save('out.gif', save_all=True, append_images=images[1:])
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Union[str, Any] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Any = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def A__ ( __lowerCAmelCase : bool = True , *__lowerCAmelCase : int , **__lowerCAmelCase : Union[str, Any] ): if not is_tqdm_available(): raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" ) lowerCamelCase__ = False if main_process_only: lowerCamelCase__ = PartialState().local_process_index == 0 return _tqdm(*__lowerCAmelCase , **__lowerCAmelCase , disable=__lowerCAmelCase )
50
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCamelCase : int = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
50
1
'''simple docstring''' import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(_lowerCAmelCase ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", # Removed: 'text_encoder/model.safetensors', """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertFalse(is_safetensors_compatible(_lowerCAmelCase ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] lowerCamelCase__ = """fp16""" self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ,variant=_lowerCAmelCase ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] lowerCamelCase__ = """fp16""" self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ,variant=_lowerCAmelCase ) ) def UpperCamelCase_ ( self ): # pass variant but use the non-variant filenames lowerCamelCase__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] lowerCamelCase__ = """fp16""" self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ,variant=_lowerCAmelCase ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] lowerCamelCase__ = """fp16""" self.assertFalse(is_safetensors_compatible(_lowerCAmelCase ,variant=_lowerCAmelCase ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", ] lowerCamelCase__ = """fp16""" self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ,variant=_lowerCAmelCase ) ) def UpperCamelCase_ ( self ): # pass variant but use the non-variant filenames lowerCamelCase__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] lowerCamelCase__ = """fp16""" self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ,variant=_lowerCAmelCase ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", # 'text_encoder/model.fp16.safetensors', """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] lowerCamelCase__ = """fp16""" self.assertFalse(is_safetensors_compatible(_lowerCAmelCase ,variant=_lowerCAmelCase ) )
50
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'gpt_bigcode' _UpperCamelCase = ['past_key_values'] _UpperCamelCase = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = scale_attn_weights lowerCamelCase__ = use_cache lowerCamelCase__ = attention_softmax_in_fpaa lowerCamelCase__ = scale_attention_softmax_in_fpaa lowerCamelCase__ = multi_query lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase : Any = 16 UpperCamelCase : Any = 32 def A__ ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : int = 16 ): lowerCamelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCamelCase__ = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__lowerCAmelCase : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCamelCase__ = datasets.map( __lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase__ = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__lowerCAmelCase : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCamelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCamelCase__ = 16 elif accelerator.mixed_precision != "no": lowerCamelCase__ = 8 else: lowerCamelCase__ = None return tokenizer.pad( __lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. lowerCamelCase__ = DataLoader( tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase ) lowerCamelCase__ = DataLoader( tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase : int = mocked_dataloaders # noqa: F811 def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ): # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCAmelCase ) == "1": lowerCamelCase__ = 2 # Initialize accelerator lowerCamelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase__ = config["""lr"""] lowerCamelCase__ = int(config["""num_epochs"""] ) lowerCamelCase__ = int(config["""seed"""] ) lowerCamelCase__ = int(config["""batch_size"""] ) lowerCamelCase__ = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation lowerCamelCase__ = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowerCamelCase__ = batch_size // MAX_GPU_BATCH_SIZE lowerCamelCase__ = MAX_GPU_BATCH_SIZE set_seed(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCamelCase__ = model.to(accelerator.device ) # Instantiate optimizer lowerCamelCase__ = AdamW(params=model.parameters() , lr=__lowerCAmelCase ) # Instantiate scheduler lowerCamelCase__ = get_linear_schedule_with_warmup( optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Now we train the model for epoch in range(__lowerCAmelCase ): model.train() for step, batch in enumerate(__lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCamelCase__ = model(**__lowerCAmelCase ) lowerCamelCase__ = outputs.loss lowerCamelCase__ = loss / gradient_accumulation_steps accelerator.backward(__lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() lowerCamelCase__ = 0 for step, batch in enumerate(__lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase__ = model(**__lowerCAmelCase ) lowerCamelCase__ = outputs.logits.argmax(dim=-1 ) lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((predictions, batch["""labels"""]) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(__lowerCAmelCase ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples lowerCamelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowerCamelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=__lowerCAmelCase , references=__lowerCAmelCase , ) lowerCamelCase__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , __lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": main()
50
'''simple docstring''' from PIL import Image def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ): def brightness(__lowerCAmelCase : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
50
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = SamImageProcessor() lowerCamelCase__ = SamProcessor(_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 ) lowerCamelCase__ = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=_lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = SamProcessor(image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) @require_torch def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = SamProcessor(image_processor=_lowerCAmelCase ) lowerCamelCase__ = [torch.ones((1, 3, 5, 5) )] lowerCamelCase__ = [[17_64, 26_46]] lowerCamelCase__ = [[6_83, 10_24]] lowerCamelCase__ = processor.post_process_masks(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) self.assertEqual(masks[0].shape ,(1, 3, 17_64, 26_46) ) lowerCamelCase__ = processor.post_process_masks( _lowerCAmelCase ,torch.tensor(_lowerCAmelCase ) ,torch.tensor(_lowerCAmelCase ) ) self.assertEqual(masks[0].shape ,(1, 3, 17_64, 26_46) ) # should also work with np lowerCamelCase__ = [np.ones((1, 3, 5, 5) )] lowerCamelCase__ = processor.post_process_masks(_lowerCAmelCase ,np.array(_lowerCAmelCase ) ,np.array(_lowerCAmelCase ) ) self.assertEqual(masks[0].shape ,(1, 3, 17_64, 26_46) ) lowerCamelCase__ = [[1, 0], [0, 1]] with self.assertRaises(_lowerCAmelCase ): lowerCamelCase__ = processor.post_process_masks(_lowerCAmelCase ,np.array(_lowerCAmelCase ) ,np.array(_lowerCAmelCase ) ) @require_vision @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = SamImageProcessor() lowerCamelCase__ = SamProcessor(_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 ) lowerCamelCase__ = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=_lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = SamProcessor(image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) @require_tf def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = SamProcessor(image_processor=_lowerCAmelCase ) lowerCamelCase__ = [tf.ones((1, 3, 5, 5) )] lowerCamelCase__ = [[17_64, 26_46]] lowerCamelCase__ = [[6_83, 10_24]] lowerCamelCase__ = processor.post_process_masks(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,return_tensors="""tf""" ) self.assertEqual(masks[0].shape ,(1, 3, 17_64, 26_46) ) lowerCamelCase__ = processor.post_process_masks( _lowerCAmelCase ,tf.convert_to_tensor(_lowerCAmelCase ) ,tf.convert_to_tensor(_lowerCAmelCase ) ,return_tensors="""tf""" ,) self.assertEqual(masks[0].shape ,(1, 3, 17_64, 26_46) ) # should also work with np lowerCamelCase__ = [np.ones((1, 3, 5, 5) )] lowerCamelCase__ = processor.post_process_masks( _lowerCAmelCase ,np.array(_lowerCAmelCase ) ,np.array(_lowerCAmelCase ) ,return_tensors="""tf""" ) self.assertEqual(masks[0].shape ,(1, 3, 17_64, 26_46) ) lowerCamelCase__ = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): lowerCamelCase__ = processor.post_process_masks( _lowerCAmelCase ,np.array(_lowerCAmelCase ) ,np.array(_lowerCAmelCase ) ,return_tensors="""tf""" ) @require_vision @require_torchvision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = SamImageProcessor() lowerCamelCase__ = SamProcessor(_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = SamProcessor(image_processor=_lowerCAmelCase ) lowerCamelCase__ = np.random.randint(0 ,2 ,size=(1, 3, 5, 5) ).astype(np.floataa ) lowerCamelCase__ = [tf.convert_to_tensor(_lowerCAmelCase )] lowerCamelCase__ = [torch.tensor(_lowerCAmelCase )] lowerCamelCase__ = [[17_64, 26_46]] lowerCamelCase__ = [[6_83, 10_24]] lowerCamelCase__ = processor.post_process_masks( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,return_tensors="""tf""" ) lowerCamelCase__ = processor.post_process_masks( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,return_tensors="""pt""" ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = SamProcessor(image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""pt""" )["""pixel_values"""].numpy() lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""pt""" )["""pixel_values"""].numpy() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""tf""" )["""pixel_values"""].numpy() lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""tf""" )["""pixel_values"""].numpy() self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ) ) self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ) ) self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ) )
50
'''simple docstring''' def A__ ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCamelCase : Dict = generate_large_matrix() UpperCamelCase : Any = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A__ ( __lowerCAmelCase : list[list[int]] ): assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid ) assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(__lowerCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCamelCase__ = (left + right) // 2 lowerCamelCase__ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCamelCase__ = mid + 1 else: lowerCamelCase__ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 lowerCamelCase__ = len(grid[0] ) for i in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = find_negative_index(grid[i][:bound] ) total += bound return (len(__lowerCAmelCase ) * len(grid[0] )) - total def A__ ( __lowerCAmelCase : list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = 0 for row in grid: for i, number in enumerate(__lowerCAmelCase ): if number < 0: total += len(__lowerCAmelCase ) - i break return total def A__ ( ): from timeit import timeit print("""Running benchmarks""" ) lowerCamelCase__ = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
50
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'gpt_bigcode' _UpperCamelCase = ['past_key_values'] _UpperCamelCase = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = scale_attn_weights lowerCamelCase__ = use_cache lowerCamelCase__ = attention_softmax_in_fpaa lowerCamelCase__ = scale_attention_softmax_in_fpaa lowerCamelCase__ = multi_query lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
50
'''simple docstring''' import argparse import os import re import packaging.version UpperCamelCase : List[Any] = 'examples/' UpperCamelCase : int = { 'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), 'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCamelCase : Any = { 'init': 'src/transformers/__init__.py', 'setup': 'setup.py', } UpperCamelCase : Any = 'README.md' def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern] lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase ) lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : str ): for folder, directories, fnames in os.walk(__lowerCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" ) def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not patch: update_version_in_examples(__lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = """🤗 Transformers currently provides the following architectures""" lowerCamelCase__ = """1. Want to contribute a new model?""" with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Find the start of the list. lowerCamelCase__ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase__ = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__lowerCAmelCase ) def A__ ( ): with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase__ = f.read() lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0] return packaging.version.parse(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Union[str, Any]=False ): lowerCamelCase__ = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase__ = default_version.base_version elif patch: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = default_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def A__ ( ): lowerCamelCase__ = get_version() lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase__ = current_version.base_version # Check with the user we got that right. lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(__lowerCAmelCase ) == 0: lowerCamelCase__ = dev_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCamelCase : Any = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
50
1
'''simple docstring''' import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=False ,): lowerCamelCase__ = size if size is not None else {"""height""": 20, """width""": 20} lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = min_resolution lowerCamelCase__ = max_resolution lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean lowerCamelCase__ = image_std lowerCamelCase__ = do_reduce_labels def UpperCamelCase_ ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def A__ ( ): lowerCamelCase__ = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" ) lowerCamelCase__ = Image.open(dataset[0]["""file"""] ) lowerCamelCase__ = Image.open(dataset[1]["""file"""] ) return image, map def A__ ( ): lowerCamelCase__ = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" ) lowerCamelCase__ = Image.open(ds[0]["""file"""] ) lowerCamelCase__ = Image.open(ds[1]["""file"""] ) lowerCamelCase__ = Image.open(ds[2]["""file"""] ) lowerCamelCase__ = Image.open(ds[3]["""file"""] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = BeitImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): lowerCamelCase__ = BeitImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""center_crop""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 20, """width""": 20} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) self.assertEqual(image_processor.do_reduce_labels ,_lowerCAmelCase ) lowerCamelCase__ = self.image_processing_class.from_dict( self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=_lowerCAmelCase ) self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) self.assertEqual(image_processor.do_reduce_labels ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,np.ndarray ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,torch.Tensor ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase ) lowerCamelCase__ = [] for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,maps[0] ,return_tensors="""pt""" ) self.assertEqual( encoding["""pixel_values"""].shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) self.assertEqual( encoding["""labels"""].shape ,( 1, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) self.assertEqual(encoding["""labels"""].dtype ,torch.long ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 2_55 ) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,_lowerCAmelCase ,return_tensors="""pt""" ) self.assertEqual( encoding["""pixel_values"""].shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) self.assertEqual( encoding["""labels"""].shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) self.assertEqual(encoding["""labels"""].dtype ,torch.long ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 2_55 ) # Test not batched input (PIL images) lowerCamelCase__ , lowerCamelCase__ = prepare_semantic_single_inputs() lowerCamelCase__ = image_processing(_lowerCAmelCase ,_lowerCAmelCase ,return_tensors="""pt""" ) self.assertEqual( encoding["""pixel_values"""].shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) self.assertEqual( encoding["""labels"""].shape ,( 1, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) self.assertEqual(encoding["""labels"""].dtype ,torch.long ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 2_55 ) # Test batched input (PIL images) lowerCamelCase__ , lowerCamelCase__ = prepare_semantic_batch_inputs() lowerCamelCase__ = image_processing(_lowerCAmelCase ,_lowerCAmelCase ,return_tensors="""pt""" ) self.assertEqual( encoding["""pixel_values"""].shape ,( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) self.assertEqual( encoding["""labels"""].shape ,( 2, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) self.assertEqual(encoding["""labels"""].dtype ,torch.long ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 2_55 ) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 lowerCamelCase__ , lowerCamelCase__ = prepare_semantic_single_inputs() lowerCamelCase__ = image_processing(_lowerCAmelCase ,_lowerCAmelCase ,return_tensors="""pt""" ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 1_50 ) lowerCamelCase__ = True lowerCamelCase__ = image_processing(_lowerCAmelCase ,_lowerCAmelCase ,return_tensors="""pt""" ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 2_55 )
50
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase : int = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } UpperCamelCase : Tuple = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } UpperCamelCase : Dict = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = SqueezeBertTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**_lowerCAmelCase ) lowerCamelCase__ = do_lower_case def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
50
1
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def A__ ( __lowerCAmelCase : Any ): if isinstance(__lowerCAmelCase , collections.abc.Iterable ): return x return (x, x) @require_flax class UpperCamelCase__ : '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): pass def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = np.abs((a - b) ).max() self.assertLessEqual(_lowerCAmelCase ,_lowerCAmelCase ,F'''Difference between torch and flax is {diff} (>= {tol}).''' ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,**_lowerCAmelCase ): lowerCamelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = FlaxVisionTextDualEncoderModel(_lowerCAmelCase ) lowerCamelCase__ = model(input_ids=_lowerCAmelCase ,pixel_values=_lowerCAmelCase ,attention_mask=_lowerCAmelCase ) self.assertEqual(output["""text_embeds"""].shape ,(input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape ,(pixel_values.shape[0], config.projection_dim) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,**_lowerCAmelCase ): lowerCamelCase__ , lowerCamelCase__ = self.get_vision_text_model(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = {"""vision_model""": vision_model, """text_model""": text_model} lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase ) lowerCamelCase__ = model(input_ids=_lowerCAmelCase ,pixel_values=_lowerCAmelCase ,attention_mask=_lowerCAmelCase ) self.assertEqual(output["""text_embeds"""].shape ,(input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape ,(pixel_values.shape[0], model.config.projection_dim) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,**_lowerCAmelCase ): lowerCamelCase__ , lowerCamelCase__ = self.get_vision_text_model(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = {"""vision_model""": vision_model, """text_model""": text_model} lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase ) lowerCamelCase__ = model(input_ids=_lowerCAmelCase ,pixel_values=_lowerCAmelCase ,attention_mask=_lowerCAmelCase ) lowerCamelCase__ = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ) lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = model(input_ids=_lowerCAmelCase ,pixel_values=_lowerCAmelCase ,attention_mask=_lowerCAmelCase ) lowerCamelCase__ = after_output[0] lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase ,1E-3 ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,**_lowerCAmelCase ): lowerCamelCase__ , lowerCamelCase__ = self.get_vision_text_model(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = {"""vision_model""": vision_model, """text_model""": text_model} lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase ) lowerCamelCase__ = model( input_ids=_lowerCAmelCase ,pixel_values=_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,output_attentions=_lowerCAmelCase ) lowerCamelCase__ = output.vision_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) ,vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase__ = to_atuple(vision_model.config.image_size ) lowerCamelCase__ = to_atuple(vision_model.config.patch_size ) lowerCamelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCamelCase__ = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) ) lowerCamelCase__ = output.text_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) ,text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): pt_model.to(_lowerCAmelCase ) pt_model.eval() # prepare inputs lowerCamelCase__ = inputs_dict lowerCamelCase__ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowerCamelCase__ = pt_model(**_lowerCAmelCase ).to_tuple() lowerCamelCase__ = fx_model(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) ,len(_lowerCAmelCase ) ,"""Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] ,pt_outputs[:4] ): self.assert_almost_equals(_lowerCAmelCase ,pt_output.numpy() ,4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(_lowerCAmelCase ) lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ,from_pt=_lowerCAmelCase ) lowerCamelCase__ = fx_model_loaded(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) ,len(_lowerCAmelCase ) ,"""Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] ,pt_outputs[:4] ): self.assert_almost_equals(_lowerCAmelCase ,pt_output.numpy() ,4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(_lowerCAmelCase ) lowerCamelCase__ = VisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ,from_flax=_lowerCAmelCase ) pt_model_loaded.to(_lowerCAmelCase ) pt_model_loaded.eval() with torch.no_grad(): lowerCamelCase__ = pt_model_loaded(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) ,len(_lowerCAmelCase ) ,"""Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] ,pt_outputs_loaded[:4] ): self.assert_almost_equals(_lowerCAmelCase ,pt_output_loaded.numpy() ,4E-2 ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = VisionTextDualEncoderModel(_lowerCAmelCase ) lowerCamelCase__ = FlaxVisionTextDualEncoderModel(_lowerCAmelCase ) lowerCamelCase__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,_lowerCAmelCase ) lowerCamelCase__ = fx_state self.check_pt_flax_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = VisionTextDualEncoderModel(_lowerCAmelCase ) lowerCamelCase__ = FlaxVisionTextDualEncoderModel(_lowerCAmelCase ) lowerCamelCase__ = load_flax_weights_in_pytorch_model(_lowerCAmelCase ,fx_model.params ) self.check_pt_flax_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() self.check_save_load(**_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_lowerCAmelCase ) @is_pt_flax_cross_test def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ = config_inputs_dict.pop("""vision_config""" ) lowerCamelCase__ = config_inputs_dict.pop("""text_config""" ) lowerCamelCase__ = config_inputs_dict self.check_equivalence_pt_to_flax(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) self.check_equivalence_flax_to_pt(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.get_pretrained_model_and_inputs() lowerCamelCase__ = model_a(**_lowerCAmelCase ) lowerCamelCase__ = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_lowerCAmelCase ) lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = model_a(**_lowerCAmelCase ) lowerCamelCase__ = after_outputs[0] lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase ,1E-5 ) @require_flax class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" ,"""hf-internal-testing/tiny-bert""" ,vision_from_pt=_lowerCAmelCase ,text_from_pt=_lowerCAmelCase ,) lowerCamelCase__ = 13 lowerCamelCase__ = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCamelCase__ = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size ) lowerCamelCase__ = random_attention_mask([batch_size, 4] ) lowerCamelCase__ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = FlaxViTModel(_lowerCAmelCase ) lowerCamelCase__ = FlaxBertModel(_lowerCAmelCase ) return vision_model, text_model def UpperCamelCase_ ( self ): lowerCamelCase__ = FlaxViTModelTester(self ) lowerCamelCase__ = FlaxBertModelTester(self ) lowerCamelCase__ = vit_model_tester.prepare_config_and_inputs() lowerCamelCase__ = bert_model_tester.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ = vision_config_and_inputs lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" ,"""hf-internal-testing/tiny-bert""" ,vision_from_pt=_lowerCAmelCase ,text_from_pt=_lowerCAmelCase ,) lowerCamelCase__ = 13 lowerCamelCase__ = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCamelCase__ = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size ) lowerCamelCase__ = random_attention_mask([batch_size, 4] ) lowerCamelCase__ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = FlaxCLIPVisionModel(_lowerCAmelCase ) lowerCamelCase__ = FlaxBertModel(_lowerCAmelCase ) return vision_model, text_model def UpperCamelCase_ ( self ): lowerCamelCase__ = FlaxCLIPVisionModelTester(self ) lowerCamelCase__ = FlaxBertModelTester(self ) lowerCamelCase__ = clip_model_tester.prepare_config_and_inputs() lowerCamelCase__ = bert_model_tester.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ = vision_config_and_inputs lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" ,logit_scale_init_value=1.0 ) lowerCamelCase__ = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowerCamelCase__ = processor( text=["""una foto di un gatto""", """una foto di un cane"""] ,images=_lowerCAmelCase ,padding=_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = model(**_lowerCAmelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,) lowerCamelCase__ = np.array([[1.228_4727, 0.310_4122]] ) self.assertTrue(np.allclose(outputs.logits_per_image ,_lowerCAmelCase ,atol=1E-3 ) )
50
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A__ ( __lowerCAmelCase : Any ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def A__ ( __lowerCAmelCase : str ): # word like '180' or '身高' or '神' for char in word: lowerCamelCase__ = ord(__lowerCAmelCase ) if not _is_chinese_char(__lowerCAmelCase ): return 0 return 1 def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = set() for token in tokens: lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase ) if chinese_word: word_set.add(__lowerCAmelCase ) lowerCamelCase__ = list(__lowerCAmelCase ) return word_list def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ): if not chinese_word_set: return bert_tokens lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] ) lowerCamelCase__ = bert_tokens lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase ) while start < end: lowerCamelCase__ = True if is_chinese(bert_word[start] ): lowerCamelCase__ = min(end - start , __lowerCAmelCase ) for i in range(__lowerCAmelCase , 1 , -1 ): lowerCamelCase__ = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCamelCase__ = """##""" + bert_word[j] lowerCamelCase__ = start + i lowerCamelCase__ = False break if single_word: start += 1 return bert_word def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ): lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res] ltp_res.extend(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = [] for id in input_ids: lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase ) input_tokens.append(__lowerCAmelCase ) lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__lowerCAmelCase ): if token[:2] == "##": lowerCamelCase__ = token[2:] # save chinese tokens' pos if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ): ref_id.append(__lowerCAmelCase ) ref_ids.append(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) return ref_ids def A__ ( __lowerCAmelCase : Optional[int] ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert ) lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids] f.writelines(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) UpperCamelCase : Any = parser.parse_args() main(args)
50
1
'''simple docstring''' from __future__ import annotations from collections.abc import Generator def A__ ( ): lowerCamelCase__ = {} lowerCamelCase__ = 2 while True: lowerCamelCase__ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase ) if factor: lowerCamelCase__ = factor + prime while x in factor_map: x += factor lowerCamelCase__ = factor else: lowerCamelCase__ = prime yield prime prime += 1 def A__ ( __lowerCAmelCase : float = 1e10 ): lowerCamelCase__ = sieve() lowerCamelCase__ = 1 while True: lowerCamelCase__ = next(__lowerCAmelCase ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(__lowerCAmelCase ) n += 2 if __name__ == "__main__": print(solution())
50
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : Tuple = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: lowerCamelCase__ = 1024 lowerCamelCase__ = 4096 lowerCamelCase__ = 24 lowerCamelCase__ = 16 lowerCamelCase__ = [5, 11, 17, 23] lowerCamelCase__ = [256, 512, 1024, 1024] lowerCamelCase__ = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = [256, 512, 768, 768] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = (1, 384, 384) lowerCamelCase__ = False lowerCamelCase__ = """project""" if "ade" in checkpoint_url: lowerCamelCase__ = True lowerCamelCase__ = 768 lowerCamelCase__ = [1, 1, 1, 0.5] lowerCamelCase__ = 150 lowerCamelCase__ = 16 lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = """ade20k-id2label.json""" lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} lowerCamelCase__ = [1, 150, 480, 480] return config, expected_shape def A__ ( __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : List[Any] ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: lowerCamelCase__ = name.replace("""patch_embed""" , """""" ) if "pos_embed" in name: lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: lowerCamelCase__ = name.replace("""proj""" , """projection""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name and "backbone" not in name: lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: lowerCamelCase__ = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: lowerCamelCase__ = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" ) if "head" in name: lowerCamelCase__ = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" ) if "backbone" in name: lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" ) if ".." in name: lowerCamelCase__ = name.replace("""..""" , """.""" ) if "stem.conv" in name: lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layers""" ) if "convolution" in name and "backbone" in name: lowerCamelCase__ = name.replace("""convolution""" , """conv""" ) if "layer" in name and "backbone" in name: lowerCamelCase__ = name.replace("""layer""" , """layers""" ) if "backbone.bit.encoder.bit" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" ) if "embedder.conv" in name: lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" ) return name def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[: config.hidden_size, :] lowerCamelCase__ = in_proj_bias[: config.hidden_size] lowerCamelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase__ = in_proj_bias[-config.hidden_size :] def A__ ( ): lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ): lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(__lowerCAmelCase ) # rename keys for key in state_dict.copy().keys(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) lowerCamelCase__ = val # read in qkv matrices read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() # Check outputs on an image lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384 lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ) # forward pass lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth if show_prediction: lowerCamelCase__ = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) parser.add_argument( '--show_prediction', action='store_true', ) UpperCamelCase : List[str] = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
50
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int ): if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Tuple = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCamelCase : int = logging.getLogger(__name__) @dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _UpperCamelCase = field( default=a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _UpperCamelCase = field( default='NER' ,metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} ) _UpperCamelCase = field( default=a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _UpperCamelCase = field(default=a ,metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCamelCase = field( default=a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,) @dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = field( metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} ) _UpperCamelCase = field( default=a ,metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} ,) _UpperCamelCase = field( default=128 ,metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } ,) _UpperCamelCase = field( default=a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def A__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) lowerCamelCase__ = import_module("""tasks""" ) try: lowerCamelCase__ = getattr(__lowerCAmelCase , model_args.task_type ) lowerCamelCase__ = token_classification_task_clazz() except AttributeError: raise ValueError( F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , __lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task lowerCamelCase__ = token_classification_task.get_labels(data_args.labels ) lowerCamelCase__ = dict(enumerate(__lowerCAmelCase ) ) lowerCamelCase__ = len(__lowerCAmelCase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid={label: i for i, label in enumerate(__lowerCAmelCase )} , cache_dir=model_args.cache_dir , ) lowerCamelCase__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) lowerCamelCase__ = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , ) # Get datasets lowerCamelCase__ = ( TokenClassificationDataset( token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowerCamelCase__ = ( TokenClassificationDataset( token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(__lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ) -> Tuple[List[int], List[int]]: lowerCamelCase__ = np.argmax(__lowerCAmelCase , axis=2 ) lowerCamelCase__ , lowerCamelCase__ = preds.shape lowerCamelCase__ = [[] for _ in range(__lowerCAmelCase )] lowerCamelCase__ = [[] for _ in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase ): for j in range(__lowerCAmelCase ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(__lowerCAmelCase : EvalPrediction ) -> Dict: lowerCamelCase__ , lowerCamelCase__ = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(__lowerCAmelCase , __lowerCAmelCase ), "precision": precision_score(__lowerCAmelCase , __lowerCAmelCase ), "recall": recall_score(__lowerCAmelCase , __lowerCAmelCase ), "f1": fa_score(__lowerCAmelCase , __lowerCAmelCase ), } # Data collator lowerCamelCase__ = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowerCamelCase__ = Trainer( model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , data_collator=__lowerCAmelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowerCamelCase__ = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowerCamelCase__ = trainer.evaluate() lowerCamelCase__ = os.path.join(training_args.output_dir , """eval_results.txt""" ) if trainer.is_world_process_zero(): with open(__lowerCAmelCase , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""" , __lowerCAmelCase , __lowerCAmelCase ) writer.write("""%s = %s\n""" % (key, value) ) results.update(__lowerCAmelCase ) # Predict if training_args.do_predict: lowerCamelCase__ = TokenClassificationDataset( token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = trainer.predict(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = align_predictions(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = os.path.join(training_args.output_dir , """test_results.txt""" ) if trainer.is_world_process_zero(): with open(__lowerCAmelCase , """w""" ) as writer: for key, value in metrics.items(): logger.info(""" %s = %s""" , __lowerCAmelCase , __lowerCAmelCase ) writer.write("""%s = %s\n""" % (key, value) ) # Save predictions lowerCamelCase__ = os.path.join(training_args.output_dir , """test_predictions.txt""" ) if trainer.is_world_process_zero(): with open(__lowerCAmelCase , """w""" ) as writer: with open(os.path.join(data_args.data_dir , """test.txt""" ) , """r""" ) as f: token_classification_task.write_predictions_to_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return results def A__ ( __lowerCAmelCase : Union[str, Any] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
50
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Dict = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'codegen' _UpperCamelCase = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size lowerCamelCase__ = n_ctx lowerCamelCase__ = n_positions lowerCamelCase__ = n_embd lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = n_inner lowerCamelCase__ = rotary_dim lowerCamelCase__ = activation_function lowerCamelCase__ = resid_pdrop lowerCamelCase__ = embd_pdrop lowerCamelCase__ = attn_pdrop lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = use_cache lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id super().__init__( bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,): super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase ) if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ): # TODO: how to do that better? lowerCamelCase__ = 0 @property def UpperCamelCase_ ( self ): lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" ) lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""} else: lowerCamelCase__ = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCamelCase_ ( self ): return self._config.n_layer @property def UpperCamelCase_ ( self ): return self._config.n_head def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,): lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs( _lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase ) # We need to order the input in the way they appears in the forward() lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase__ = seqlen + 2 lowerCamelCase__ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase__ = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers ) ] lowerCamelCase__ = common_inputs["""attention_mask"""] if self.use_past: lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype lowerCamelCase__ = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 ) return ordered_inputs @property def UpperCamelCase_ ( self ): return 13
50
1
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = jnp.ones((batch_size, length) ) / length return scores def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 20 lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase ) # tweak scores to not be uniform anymore lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create ramp distribution lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowerCamelCase__ = 5 lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 ) lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy() lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # check edge cases with negative and extreme logits lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowerCamelCase__ = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) # check that min length is applied at length 5 lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 ) lowerCamelCase__ = 5 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] ) # check that min length is not applied anymore at length 15 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = 15 lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the bos_token_id score lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 ) lowerCamelCase__ = 1 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = 5 lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the eos_token_id when max_length is reached lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 ) lowerCamelCase__ = 4 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # with processor list lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores # with processor list def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
50
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : int = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() UpperCamelCase : str = logging.get_logger('transformers.models.speecht5') def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): hf_model.apply_weight_norm() lowerCamelCase__ = checkpoint["""input_conv.weight_g"""] lowerCamelCase__ = checkpoint["""input_conv.weight_v"""] lowerCamelCase__ = checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): lowerCamelCase__ = checkpoint[F'''upsamples.{i}.1.weight_g'''] lowerCamelCase__ = checkpoint[F'''upsamples.{i}.1.weight_v'''] lowerCamelCase__ = checkpoint[F'''upsamples.{i}.1.bias'''] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): lowerCamelCase__ = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g'''] lowerCamelCase__ = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v'''] lowerCamelCase__ = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias'''] lowerCamelCase__ = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g'''] lowerCamelCase__ = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v'''] lowerCamelCase__ = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias'''] lowerCamelCase__ = checkpoint["""output_conv.1.weight_g"""] lowerCamelCase__ = checkpoint["""output_conv.1.weight_v"""] lowerCamelCase__ = checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Dict=None , ): if config_path is not None: lowerCamelCase__ = SpeechTaHifiGanConfig.from_pretrained(__lowerCAmelCase ) else: lowerCamelCase__ = SpeechTaHifiGanConfig() lowerCamelCase__ = SpeechTaHifiGan(__lowerCAmelCase ) lowerCamelCase__ = torch.load(__lowerCAmelCase ) load_weights(orig_checkpoint["""model"""]["""generator"""] , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = np.load(__lowerCAmelCase ) lowerCamelCase__ = stats[0].reshape(-1 ) lowerCamelCase__ = stats[1].reshape(-1 ) lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase ).float() lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase ).float() model.save_pretrained(__lowerCAmelCase ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint') parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) UpperCamelCase : Optional[Any] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
50
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(__lowerCAmelCase , torch.Tensor ): raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" ) lowerCamelCase__ = v.half() if save_path is None: # overwrite src_path lowerCamelCase__ = src_path torch.save(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": fire.Fire(convert)
50
1
'''simple docstring''' import torch from diffusers import DiffusionPipeline class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ): super().__init__() self.register_modules(unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ) def __call__( self ): lowerCamelCase__ = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) ,) lowerCamelCase__ = 1 lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase ).sample lowerCamelCase__ = self.scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ).prev_sample lowerCamelCase__ = scheduler_output - scheduler_output + torch.ones_like(_lowerCAmelCase ) return result
50
'''simple docstring''' import os from pathlib import Path def A__ ( ): from torch.utils.cpp_extension import load lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" lowerCamelCase__ = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
50
1
'''simple docstring''' import functools def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ): # Validation if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ): raise ValueError("""The parameter days should be a list of integers""" ) if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ): raise ValueError("""The parameter costs should be a list of three integers""" ) if len(__lowerCAmelCase ) == 0: return 0 if min(__lowerCAmelCase ) <= 0: raise ValueError("""All days elements should be greater than 0""" ) if max(__lowerCAmelCase ) >= 366: raise ValueError("""All days elements should be less than 366""" ) lowerCamelCase__ = set(__lowerCAmelCase ) @functools.cache def dynamic_programming(__lowerCAmelCase : int ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
50
'''simple docstring''' def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ): lowerCamelCase__ = len(__lowerCAmelCase ) print("""The following activities are selected:""" ) # The first activity is always selected lowerCamelCase__ = 0 print(__lowerCAmelCase , end=""",""" ) # Consider rest of the activities for j in range(__lowerCAmelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(__lowerCAmelCase , end=""",""" ) lowerCamelCase__ = j if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5] UpperCamelCase : int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
50
1
'''simple docstring''' import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : str = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : Tuple ): lowerCamelCase__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) lowerCamelCase__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , __lowerCAmelCase ) if matches: lowerCamelCase__ = float(matches[1] ) lowerCamelCase__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". lowerCamelCase__ = 1001 lowerCamelCase__ = """imagenet-1k-id2label.json""" lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ) + 1: v for k, v in idalabel.items()} lowerCamelCase__ = """background""" lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} return config def A__ ( ): lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=False ): lowerCamelCase__ = get_mobilenet_va_config(__lowerCAmelCase ) # Load 🤗 model lowerCamelCase__ = MobileNetVaForImageClassification(__lowerCAmelCase ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor lowerCamelCase__ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , ) lowerCamelCase__ = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowerCamelCase__ = model(**__lowerCAmelCase ) lowerCamelCase__ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": lowerCamelCase__ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": lowerCamelCase__ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: lowerCamelCase__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: print("""Pushing to the hub...""" ) lowerCamelCase__ = """google/""" + model_name image_processor.push_to_hub(__lowerCAmelCase ) model.push_to_hub(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCamelCase : Tuple = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
50
'''simple docstring''' import warnings from ..trainer import Trainer from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ): warnings.warn( """`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """ """instead.""" ,_lowerCAmelCase ,) super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
50
1
'''simple docstring''' from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class UpperCamelCase__ : '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ): raise NotImplementedError() def UpperCamelCase_ ( self ): raise NotImplementedError() class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = False ,**_lowerCAmelCase ): lowerCamelCase__ = tokenizer lowerCamelCase__ = skip_prompt lowerCamelCase__ = decode_kwargs # variables used in the streaming process lowerCamelCase__ = [] lowerCamelCase__ = 0 lowerCamelCase__ = True def UpperCamelCase_ ( self ,_lowerCAmelCase ): if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError("""TextStreamer only supports batch size 1""" ) elif len(value.shape ) > 1: lowerCamelCase__ = value[0] if self.skip_prompt and self.next_tokens_are_prompt: lowerCamelCase__ = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) lowerCamelCase__ = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith("""\n""" ): lowerCamelCase__ = text[self.print_len :] lowerCamelCase__ = [] lowerCamelCase__ = 0 # If the last token is a CJK character, we print the characters. elif len(_lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ): lowerCamelCase__ = text[self.print_len :] self.print_len += len(_lowerCAmelCase ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: lowerCamelCase__ = text[self.print_len : text.rfind(""" """ ) + 1] self.print_len += len(_lowerCAmelCase ) self.on_finalized_text(_lowerCAmelCase ) def UpperCamelCase_ ( self ): # Flush the cache, if it exists if len(self.token_cache ) > 0: lowerCamelCase__ = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs ) lowerCamelCase__ = text[self.print_len :] lowerCamelCase__ = [] lowerCamelCase__ = 0 else: lowerCamelCase__ = """""" lowerCamelCase__ = True self.on_finalized_text(_lowerCAmelCase ,stream_end=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = False ): print(_lowerCAmelCase ,flush=_lowerCAmelCase ,end="""""" if not stream_end else None ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4e_00 and cp <= 0x9f_ff) or (cp >= 0x34_00 and cp <= 0x4d_bf) # or (cp >= 0x2_00_00 and cp <= 0x2_a6_df) # or (cp >= 0x2_a7_00 and cp <= 0x2_b7_3f) # or (cp >= 0x2_b7_40 and cp <= 0x2_b8_1f) # or (cp >= 0x2_b8_20 and cp <= 0x2_ce_af) # or (cp >= 0xf9_00 and cp <= 0xfa_ff) or (cp >= 0x2_f8_00 and cp <= 0x2_fa_1f) # ): # return True return False class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,**_lowerCAmelCase ): super().__init__(_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = Queue() lowerCamelCase__ = None lowerCamelCase__ = timeout def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = False ): self.text_queue.put(_lowerCAmelCase ,timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal ,timeout=self.timeout ) def __iter__( self ): return self def UpperCamelCase_ ( self ): lowerCamelCase__ = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
50
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = [] for line in lines: lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments if line: filtered_lines.append(__lowerCAmelCase ) lowerCamelCase__ = """\n""".join(__lowerCAmelCase ) # Make a hash from all this code lowerCamelCase__ = full_str.encode("""utf-8""" ) return shaaaa(__lowerCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCamelCase : Dict = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCamelCase : str = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCamelCase : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
50
1
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig UpperCamelCase : List[str] = { 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'ernie_m' _UpperCamelCase = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self ,_lowerCAmelCase = 25_00_02 ,_lowerCAmelCase = 7_68 ,_lowerCAmelCase = 12 ,_lowerCAmelCase = 12 ,_lowerCAmelCase = 30_72 ,_lowerCAmelCase = "gelu" ,_lowerCAmelCase = 0.1 ,_lowerCAmelCase = 0.1 ,_lowerCAmelCase = 5_14 ,_lowerCAmelCase = 0.02 ,_lowerCAmelCase = 1 ,_lowerCAmelCase = 1E-05 ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=0.0 ,**_lowerCAmelCase ,): super().__init__(pad_token_id=_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = initializer_range lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = classifier_dropout lowerCamelCase__ = is_decoder lowerCamelCase__ = act_dropout
50
'''simple docstring''' import operator def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ): lowerCamelCase__ = operator.lt if reverse else operator.gt lowerCamelCase__ = solution or [] if not arr: return solution lowerCamelCase__ = [arr.pop(0 )] for i, item in enumerate(__lowerCAmelCase ): if _operator(__lowerCAmelCase , sublist[-1] ): sublist.append(__lowerCAmelCase ) arr.pop(__lowerCAmelCase ) # merging sublist into solution list if not solution: solution.extend(__lowerCAmelCase ) else: while sublist: lowerCamelCase__ = sublist.pop(0 ) for i, xx in enumerate(__lowerCAmelCase ): if not _operator(__lowerCAmelCase , __lowerCAmelCase ): solution.insert(__lowerCAmelCase , __lowerCAmelCase ) break else: solution.append(__lowerCAmelCase ) strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
50
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : List[str] = { 'configuration_x_clip': [ 'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XCLIPConfig', 'XCLIPTextConfig', 'XCLIPVisionConfig', ], 'processing_x_clip': ['XCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Union[str, Any] = [ 'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'XCLIPModel', 'XCLIPPreTrainedModel', 'XCLIPTextModel', 'XCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys UpperCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A__ ( __lowerCAmelCase : dict ): return (data["data"], data["target"]) def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ): lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__lowerCAmelCase , __lowerCAmelCase ) # Predict target for test data lowerCamelCase__ = xgb.predict(__lowerCAmelCase ) lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 ) return predictions def A__ ( ): lowerCamelCase__ = fetch_california_housing() lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split( __lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 ) lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
50
1
'''simple docstring''' import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed UpperCamelCase : int = { 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def A__ ( __lowerCAmelCase : str ): assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] ): if args.student_type == "roberta": lowerCamelCase__ = False elif args.student_type == "gpt2": lowerCamelCase__ = False def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): if args.student_type == "roberta": lowerCamelCase__ = False def A__ ( ): lowerCamelCase__ = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , ) parser.add_argument( """--student_type""" , type=__lowerCAmelCase , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__lowerCAmelCase , help="""The student type (DistilBERT, RoBERTa).""" , ) parser.add_argument("""--student_config""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""" , default=__lowerCAmelCase , type=__lowerCAmelCase , help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__lowerCAmelCase , help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The teacher model.""" ) parser.add_argument("""--temperature""" , default=2.0 , type=__lowerCAmelCase , help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""" , default=0.5 , type=__lowerCAmelCase , help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""" , default=0.0 , type=__lowerCAmelCase , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , ) parser.add_argument("""--alpha_clm""" , default=0.5 , type=__lowerCAmelCase , help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""" , default=0.0 , type=__lowerCAmelCase , help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""" , default=0.0 , type=__lowerCAmelCase , help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""" , default=0.15 , type=__lowerCAmelCase , help="""Proportion of tokens for which we need to make a prediction.""" , ) parser.add_argument("""--word_mask""" , default=0.8 , type=__lowerCAmelCase , help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""" , default=0.1 , type=__lowerCAmelCase , help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""" , default=0.1 , type=__lowerCAmelCase , help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""" , default=0.7 , type=__lowerCAmelCase , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , ) parser.add_argument("""--token_counts""" , type=__lowerCAmelCase , help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , ) parser.add_argument( """--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , ) parser.add_argument( """--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , ) parser.add_argument("""--n_epoch""" , type=__lowerCAmelCase , default=3 , help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""" , type=__lowerCAmelCase , default=5 , help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , ) parser.add_argument( """--gradient_accumulation_steps""" , type=__lowerCAmelCase , default=50 , help="""Gradient accumulation for larger training batches.""" , ) parser.add_argument("""--warmup_prop""" , default=0.05 , type=__lowerCAmelCase , help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""" , default=0.0 , type=__lowerCAmelCase , help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""" , default=5e-4 , type=__lowerCAmelCase , help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=__lowerCAmelCase , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__lowerCAmelCase , help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""" , default=0.02 , type=__lowerCAmelCase , help="""Random initialization range.""" ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=__lowerCAmelCase , default="""O1""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_gpu""" , type=__lowerCAmelCase , default=1 , help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""" , type=__lowerCAmelCase , default=-1 , help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""" , type=__lowerCAmelCase , default=56 , help="""Random seed""" ) parser.add_argument("""--log_interval""" , type=__lowerCAmelCase , default=500 , help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""" , type=__lowerCAmelCase , default=4000 , help="""Checkpoint interval.""" ) lowerCamelCase__ = parser.parse_args() sanity_checks(__lowerCAmelCase ) # ARGS # init_gpu_params(__lowerCAmelCase ) set_seed(__lowerCAmelCase ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f: json.dump(vars(__lowerCAmelCase ) , __lowerCAmelCase , indent=4 ) git_log(args.dump_path ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = MODEL_CLASSES[args.student_type] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = MODEL_CLASSES[args.teacher_type] # TOKENIZER # lowerCamelCase__ = teacher_tokenizer_class.from_pretrained(args.teacher_name ) lowerCamelCase__ = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): lowerCamelCase__ = tokenizer.all_special_tokens.index(__lowerCAmelCase ) lowerCamelCase__ = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) lowerCamelCase__ = special_tok_ids lowerCamelCase__ = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , """rb""" ) as fp: lowerCamelCase__ = pickle.load(__lowerCAmelCase ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , """rb""" ) as fp: lowerCamelCase__ = pickle.load(__lowerCAmelCase ) lowerCamelCase__ = np.maximum(__lowerCAmelCase , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): lowerCamelCase__ = 0.0 # do not predict special tokens lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase ) else: lowerCamelCase__ = None lowerCamelCase__ = LmSeqsDataset(params=__lowerCAmelCase , data=__lowerCAmelCase ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) lowerCamelCase__ = student_config_class.from_pretrained(args.student_config ) lowerCamelCase__ = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) lowerCamelCase__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__lowerCAmelCase ) else: lowerCamelCase__ = student_model_class(__lowerCAmelCase ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info("""Student loaded.""" ) # TEACHER # lowerCamelCase__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__lowerCAmelCase ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(__lowerCAmelCase , __lowerCAmelCase ) if args.freeze_token_type_embds: freeze_token_type_embeddings(__lowerCAmelCase , __lowerCAmelCase ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() lowerCamelCase__ = Distiller( params=__lowerCAmelCase , dataset=__lowerCAmelCase , token_probs=__lowerCAmelCase , student=__lowerCAmelCase , teacher=__lowerCAmelCase ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
50
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = jnp.ones((batch_size, length) ) / length return scores def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 20 lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase ) # tweak scores to not be uniform anymore lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create ramp distribution lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowerCamelCase__ = 5 lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 ) lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy() lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # check edge cases with negative and extreme logits lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowerCamelCase__ = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) # check that min length is applied at length 5 lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 ) lowerCamelCase__ = 5 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] ) # check that min length is not applied anymore at length 15 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = 15 lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the bos_token_id score lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 ) lowerCamelCase__ = 1 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = 5 lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the eos_token_id when max_length is reached lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 ) lowerCamelCase__ = 4 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # with processor list lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores # with processor list def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
50
1
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=a ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = field(default='question-answering-extractive' ,metadata={'include_in_asdict_even_if_is_default': True} ) _UpperCamelCase = Features({'question': Value('string' ), 'context': Value('string' )} ) _UpperCamelCase = Features( { 'answers': Sequence( { 'text': Value('string' ), 'answer_start': Value('int32' ), } ) } ) _UpperCamelCase = "question" _UpperCamelCase = "context" _UpperCamelCase = "answers" @property def UpperCamelCase_ ( self ): return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase : Any = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1