code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
from math import factorial, pi def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 3_0 ): """simple docstring""" if not isinstance(lowerCAmelCase , (int, float) ): raise ValueError("maclaurin_sin() requires either an int or float for theta" ) if not isinstance(lowerCAmelCase , lowerCAmelCase ) or accuracy <= 0: raise ValueError("maclaurin_sin() requires a positive int for accuracy" ) SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : List[Any] = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowerCAmelCase ) ) def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 3_0 ): """simple docstring""" if not isinstance(lowerCAmelCase , (int, float) ): raise ValueError("maclaurin_cos() requires either an int or float for theta" ) if not isinstance(lowerCAmelCase , lowerCAmelCase ) or accuracy <= 0: raise ValueError("maclaurin_cos() requires a positive int for accuracy" ) SCREAMING_SNAKE_CASE_ : str = float(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[int] = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
18
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def __lowerCAmelCase ( snake_case__ ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case__ ) def __lowerCAmelCase ( snake_case__ ): from transformers.testing_utils import pytest_terminal_summary_main __UpperCamelCase : int = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
298
0
from numpy import exp, pi, sqrt def A__ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ) -> int: return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
223
import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[int] , snake_case_ : Dict , snake_case_ : Tuple=7 , snake_case_ : Optional[Any]=3 , snake_case_ : Dict=18 , snake_case_ : Dict=30 , snake_case_ : Union[str, Any]=400 , snake_case_ : List[Any]=True , snake_case_ : Any=None , snake_case_ : List[str]=True , ): UpperCamelCase_: Dict = size if size is not None else {"""height""": 18, """width""": 18} UpperCamelCase_: Union[str, Any] = parent UpperCamelCase_: Tuple = batch_size UpperCamelCase_: List[str] = num_channels UpperCamelCase_: Optional[int] = image_size UpperCamelCase_: Dict = min_resolution UpperCamelCase_: Optional[int] = max_resolution UpperCamelCase_: str = do_resize UpperCamelCase_: Tuple = size UpperCamelCase_: Dict = do_normalize def lowerCAmelCase__ ( self : str ): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804], [-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class _UpperCamelCase ( _A , unittest.TestCase ): '''simple docstring''' __UpperCamelCase : Optional[int] = ImageGPTImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self : List[str] ): UpperCamelCase_: Any = ImageGPTImageProcessingTester(self ) @property def lowerCAmelCase__ ( self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self : Optional[int] ): UpperCamelCase_: List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case_ , """clusters""" ) ) self.assertTrue(hasattr(snake_case_ , """do_resize""" ) ) self.assertTrue(hasattr(snake_case_ , """size""" ) ) self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) ) def lowerCAmelCase__ ( self : int ): UpperCamelCase_: List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) UpperCamelCase_: Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def lowerCAmelCase__ ( self : Optional[Any] ): UpperCamelCase_: int = self.image_processing_class(**self.image_processor_dict ) UpperCamelCase_: Optional[int] = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(snake_case_ , obj[key] ) ) else: self.assertEqual(obj[key] , snake_case_ ) def lowerCAmelCase__ ( self : List[Any] ): UpperCamelCase_: Dict = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase_: int = os.path.join(snake_case_ , """image_processor.json""" ) image_processor_first.to_json_file(snake_case_ ) UpperCamelCase_: Any = self.image_processing_class.from_json_file(snake_case_ ).to_dict() UpperCamelCase_: str = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , snake_case_ ) def lowerCAmelCase__ ( self : Union[str, Any] ): UpperCamelCase_: Optional[Any] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(snake_case_ ) UpperCamelCase_: Optional[int] = self.image_processing_class.from_pretrained(snake_case_ ).to_dict() UpperCamelCase_: Union[str, Any] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , snake_case_ ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def lowerCAmelCase__ ( self : List[Any] ): pass def A__ ( ) -> Optional[int]: UpperCamelCase_: Optional[int] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" ) UpperCamelCase_: Tuple = Image.open(dataset[4]["""file"""] ) UpperCamelCase_: Union[str, Any] = Image.open(dataset[5]["""file"""] ) UpperCamelCase_: List[str] = [imagea, imagea] return images @require_vision @require_torch class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase__ ( self : List[str] ): UpperCamelCase_: List[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) UpperCamelCase_: List[str] = prepare_images() # test non-batched UpperCamelCase_: List[str] = image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1024) ) UpperCamelCase_: Union[str, Any] = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , snake_case_ ) # test batched UpperCamelCase_: Optional[int] = image_processing(snake_case_ , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1024) ) UpperCamelCase_: str = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , snake_case_ )
223
1
"""simple docstring""" import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) a = [ 'cross_validation.py', 'gradient_accumulation.py', 'local_sgd.py', 'multi_process_metrics.py', 'memory.py', 'automatic_gradient_accumulation.py', 'fsdp_with_peak_mem_tracking.py', 'deepspeed_with_config_support.py', 'megatron_lm_gpt_pretraining.py', ] class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowercase ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : bool , lowerCAmelCase : str = None , lowerCAmelCase : list = None ): lowerCAmelCase = None lowerCAmelCase = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) ) lowerCAmelCase = os.path.abspath("""examples""" ) for item in os.listdir(lowerCAmelCase ): if item not in EXCLUDE_EXAMPLES: lowerCAmelCase = os.path.join(lowerCAmelCase , lowerCAmelCase ) if os.path.isfile(lowerCAmelCase ) and ".py" in item_path: with self.subTest( tested_script=lowerCAmelCase , feature_script=lowerCAmelCase , tested_section="""main()""" if parser_only else """training_function()""" , ): lowerCAmelCase = compare_against_test( os.path.join(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) lowerCAmelCase = """\n""".join(lowerCAmelCase ) if special_strings is not None: for string in special_strings: lowerCAmelCase = diff.replace(lowerCAmelCase , """""" ) self.assertEqual(lowerCAmelCase , """""" ) def __lowercase ( self : Union[str, Any] ): self.one_complete_example("""complete_nlp_example.py""" , lowerCAmelCase ) self.one_complete_example("""complete_nlp_example.py""" , lowerCAmelCase ) def __lowercase ( self : Optional[int] ): lowerCAmelCase = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) ) lowerCAmelCase = [ """ """ * 16 + """{\n\n""", """ """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""", """ """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""", """ """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""", """ """ * 20 + """\"epoch\": epoch,\n\n""", """ """ * 16 + """},\n\n""", """ """ * 16 + """step=epoch,\n""", """ """ * 12, """ """ * 8 + """for step, batch in enumerate(active_dataloader):\n""", ] self.one_complete_example("""complete_cv_example.py""" , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) self.one_complete_example("""complete_cv_example.py""" , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) @mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} ) class SCREAMING_SNAKE_CASE__ ( _a ): _a = False @classmethod def __lowercase ( cls : List[Any] ): super().setUpClass() lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = os.path.join(cls._tmpdir , """default_config.yml""" ) write_basic_config(save_location=cls.configPath ) lowerCAmelCase = ["""accelerate""", """launch""", """--config_file""", cls.configPath] @classmethod def __lowercase ( cls : Optional[Any] ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def __lowercase ( self : Optional[int] ): lowerCAmelCase = f''' examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) ) def __lowercase ( self : Tuple ): lowerCAmelCase = f''' examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} '''.split() lowerCAmelCase = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) ) def __lowercase ( self : Optional[int] ): lowerCAmelCase = f''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )} '''.split() lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase ) self.assertNotIn("""epoch 0:""" , lowerCAmelCase ) self.assertIn("""epoch 1:""" , lowerCAmelCase ) def __lowercase ( self : Union[str, Any] ): lowerCAmelCase = f''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )} '''.split() lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase ) if torch.cuda.is_available(): lowerCAmelCase = torch.cuda.device_count() else: lowerCAmelCase = 1 if num_processes > 1: self.assertNotIn("""epoch 0:""" , lowerCAmelCase ) self.assertIn("""epoch 1:""" , lowerCAmelCase ) else: self.assertIn("""epoch 0:""" , lowerCAmelCase ) self.assertIn("""epoch 1:""" , lowerCAmelCase ) @slow def __lowercase ( self : str ): lowerCAmelCase = """ examples/by_feature/cross_validation.py --num_folds 2 """.split() with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ): lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase ) lowerCAmelCase = re.findall("""({.+})""" , lowerCAmelCase ) lowerCAmelCase = [r for r in results if """accuracy""" in r][-1] lowerCAmelCase = ast.literal_eval(lowerCAmelCase ) self.assertGreaterEqual(results["""accuracy"""] , 0.75 ) def __lowercase ( self : List[Any] ): lowerCAmelCase = ["""examples/by_feature/multi_process_metrics.py"""] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} ) def __lowercase ( self : Any ): with tempfile.TemporaryDirectory() as tmpdir: lowerCAmelCase = f''' examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase , """tracking""" ) ) ) def __lowercase ( self : List[Any] ): lowerCAmelCase = ["""examples/by_feature/gradient_accumulation.py"""] run_command(self._launch_args + testargs ) def __lowercase ( self : Any ): lowerCAmelCase = ["""examples/by_feature/local_sgd.py"""] run_command(self._launch_args + testargs )
155
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a = { 'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ 'LILT_PRETRAINED_MODEL_ARCHIVE_LIST', 'LiltForQuestionAnswering', 'LiltForSequenceClassification', 'LiltForTokenClassification', 'LiltModel', 'LiltPreTrainedModel', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
155
1
"""simple docstring""" from typing import Any def __lowercase ( snake_case_ : list ,snake_case_ : list ,snake_case_ : dict ,snake_case_ : dict ,snake_case_ : dict ,) ->list: '''simple docstring''' _validation( snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,) # Creates data structures and fill initial step __A : dict = {} __A : dict = {} for state in states_space: __A : str = observations_space[0] __A : str = ( initial_probabilities[state] * emission_probabilities[state][observation] ) __A : Dict = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 ,len(snake_case_ ) ): __A : Any = observations_space[o] __A : Any = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function __A : Tuple = '''''' __A : Union[str, Any] = -1 for k_state in states_space: __A : Optional[Any] = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: __A : List[str] = probability __A : Union[str, Any] = k_state # Update probabilities and pointers dicts __A : Optional[Any] = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) __A : Tuple = arg_max # The final observation __A : Dict = observations_space[len(snake_case_ ) - 1] # argmax for given final observation __A : Union[str, Any] = '''''' __A : Optional[Any] = -1 for k_state in states_space: __A : Any = probabilities[(k_state, final_observation)] if probability > max_probability: __A : Any = probability __A : Optional[int] = k_state __A : Optional[int] = arg_max # Process pointers backwards __A : int = last_state __A : int = [] for o in range(len(snake_case_ ) - 1 ,-1 ,-1 ): result.append(snake_case_ ) __A : Optional[int] = pointers[previous, observations_space[o]] result.reverse() return result def __lowercase ( snake_case_ : Any ,snake_case_ : Any ,snake_case_ : Any ,snake_case_ : Any ,snake_case_ : Any ,) ->None: '''simple docstring''' _validate_not_empty( snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,) _validate_lists(snake_case_ ,snake_case_ ) _validate_dicts( snake_case_ ,snake_case_ ,snake_case_ ) def __lowercase ( snake_case_ : Any ,snake_case_ : Any ,snake_case_ : Any ,snake_case_ : Any ,snake_case_ : Any ,) ->None: '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('''There\'s an empty parameter''' ) def __lowercase ( snake_case_ : Any ,snake_case_ : Any ) ->None: '''simple docstring''' _validate_list(snake_case_ ,'''observations_space''' ) _validate_list(snake_case_ ,'''states_space''' ) def __lowercase ( snake_case_ : Any ,snake_case_ : str ) ->None: '''simple docstring''' if not isinstance(_object ,snake_case_ ): __A : Optional[int] = F"""{var_name} must be a list""" raise ValueError(snake_case_ ) else: for x in _object: if not isinstance(snake_case_ ,snake_case_ ): __A : Optional[Any] = F"""{var_name} must be a list of strings""" raise ValueError(snake_case_ ) def __lowercase ( snake_case_ : Any ,snake_case_ : Any ,snake_case_ : Any ,) ->None: '''simple docstring''' _validate_dict(snake_case_ ,'''initial_probabilities''' ,snake_case_ ) _validate_nested_dict(snake_case_ ,'''transition_probabilities''' ) _validate_nested_dict(snake_case_ ,'''emission_probabilities''' ) def __lowercase ( snake_case_ : Any ,snake_case_ : str ) ->None: '''simple docstring''' _validate_dict(_object ,snake_case_ ,snake_case_ ) for x in _object.values(): _validate_dict(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ) def __lowercase ( snake_case_ : Any ,snake_case_ : str ,snake_case_ : type ,snake_case_ : bool = False ) ->None: '''simple docstring''' if not isinstance(_object ,snake_case_ ): __A : int = F"""{var_name} must be a dict""" raise ValueError(snake_case_ ) if not all(isinstance(snake_case_ ,snake_case_ ) for x in _object ): __A : List[Any] = F"""{var_name} all keys must be strings""" raise ValueError(snake_case_ ) if not all(isinstance(snake_case_ ,snake_case_ ) for x in _object.values() ): __A : int = '''nested dictionary ''' if nested else '''''' __A : int = F"""{var_name} {nested_text}all values must be {value_type.__name__}""" raise ValueError(snake_case_ ) if __name__ == "__main__": from doctest import testmod testmod()
291
"""simple docstring""" a_ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) a_ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def __lowercase ( snake_case_ : float ,snake_case_ : str ,snake_case_ : str ) ->float: '''simple docstring''' __A : Tuple = from_type.lower().strip('''s''' ) __A : Optional[int] = to_type.lower().strip('''s''' ) __A : List[str] = UNIT_SYMBOL.get(snake_case_ ,snake_case_ ) __A : Any = UNIT_SYMBOL.get(snake_case_ ,snake_case_ ) if from_sanitized not in METRIC_CONVERSION: __A : int = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {', '.join(snake_case_ )}""" ) raise ValueError(snake_case_ ) if to_sanitized not in METRIC_CONVERSION: __A : str = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {', '.join(snake_case_ )}""" ) raise ValueError(snake_case_ ) __A : Optional[Any] = METRIC_CONVERSION[from_sanitized] __A : Optional[int] = METRIC_CONVERSION[to_sanitized] __A : Union[str, Any] = 1 if from_exponent > to_exponent: __A : Dict = from_exponent - to_exponent else: __A : Union[str, Any] = -(to_exponent - from_exponent) return value * pow(10 ,snake_case_ ) if __name__ == "__main__": from doctest import testmod testmod()
291
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''facebook/data2vec-vision-base-ft''': ( '''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json''' ), } class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : List[Any] ="data2vec-vision" def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=0.1 , snake_case__=0.1 , snake_case__=True , snake_case__=[3, 5, 7, 11] , snake_case__=[1, 2, 3, 6] , snake_case__=True , snake_case__=0.4 , snake_case__=256 , snake_case__=1 , snake_case__=False , snake_case__=255 , **snake_case__ , ): """simple docstring""" super().__init__(**snake_case__ ) lowerCAmelCase : Tuple = hidden_size lowerCAmelCase : List[Any] = num_hidden_layers lowerCAmelCase : Tuple = num_attention_heads lowerCAmelCase : Optional[int] = intermediate_size lowerCAmelCase : Optional[int] = hidden_act lowerCAmelCase : Dict = hidden_dropout_prob lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob lowerCAmelCase : int = initializer_range lowerCAmelCase : Dict = layer_norm_eps lowerCAmelCase : Optional[int] = image_size lowerCAmelCase : Optional[Any] = patch_size lowerCAmelCase : Optional[Any] = num_channels lowerCAmelCase : Union[str, Any] = use_mask_token lowerCAmelCase : str = use_absolute_position_embeddings lowerCAmelCase : Any = use_relative_position_bias lowerCAmelCase : List[str] = use_shared_relative_position_bias lowerCAmelCase : str = layer_scale_init_value lowerCAmelCase : Union[str, Any] = drop_path_rate lowerCAmelCase : Any = use_mean_pooling # decode head attributes (semantic segmentation) lowerCAmelCase : Optional[int] = out_indices lowerCAmelCase : Union[str, Any] = pool_scales # auxiliary head attributes (semantic segmentation) lowerCAmelCase : str = use_auxiliary_head lowerCAmelCase : int = auxiliary_loss_weight lowerCAmelCase : Tuple = auxiliary_channels lowerCAmelCase : List[str] = auxiliary_num_convs lowerCAmelCase : Tuple = auxiliary_concat_input lowerCAmelCase : List[str] = semantic_loss_ignore_index class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : Union[str, Any] =version.parse("1.11" ) @property def lowercase__ ( self ): """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def lowercase__ ( self ): """simple docstring""" return 1e-4
108
"""simple docstring""" import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: if isinstance(__lowerCAmelCase , torch.Tensor ): return image elif isinstance(__lowerCAmelCase , PIL.Image.Image ): SCREAMING_SNAKE_CASE__ : Any = [image] if isinstance(image[0] , PIL.Image.Image ): SCREAMING_SNAKE_CASE__ : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image] SCREAMING_SNAKE_CASE__ : List[Any] = np.concatenate(__lowerCAmelCase , axis=0 ) SCREAMING_SNAKE_CASE__ : List[str] = np.array(__lowerCAmelCase ).astype(np.floataa ) / 255.0 SCREAMING_SNAKE_CASE__ : Optional[Any] = image.transpose(0 , 3 , 1 , 2 ) SCREAMING_SNAKE_CASE__ : Tuple = 2.0 * image - 1.0 SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(__lowerCAmelCase ) elif isinstance(image[0] , torch.Tensor ): SCREAMING_SNAKE_CASE__ : Tuple = torch.cat(__lowerCAmelCase , dim=0 ) return image def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0.9_995 ) -> Union[str, Any]: if not isinstance(__lowerCAmelCase , np.ndarray ): SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : int = va.device SCREAMING_SNAKE_CASE__ : str = va.cpu().numpy() SCREAMING_SNAKE_CASE__ : str = va.cpu().numpy() SCREAMING_SNAKE_CASE__ : Any = np.sum(va * va / (np.linalg.norm(__lowerCAmelCase ) * np.linalg.norm(__lowerCAmelCase )) ) if np.abs(__lowerCAmelCase ) > DOT_THRESHOLD: SCREAMING_SNAKE_CASE__ : Tuple = (1 - t) * va + t * va else: SCREAMING_SNAKE_CASE__ : Optional[int] = np.arccos(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.sin(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : str = theta_a * t SCREAMING_SNAKE_CASE__ : Tuple = np.sin(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = np.sin(theta_a - theta_t ) / sin_theta_a SCREAMING_SNAKE_CASE__ : Optional[int] = sin_theta_t / sin_theta_a SCREAMING_SNAKE_CASE__ : List[Any] = sa * va + sa * va if inputs_are_torch: SCREAMING_SNAKE_CASE__ : str = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase ) return va def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any: SCREAMING_SNAKE_CASE__ : Tuple = F.normalize(__lowerCAmelCase , dim=-1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = F.normalize(__lowerCAmelCase , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: for param in model.parameters(): SCREAMING_SNAKE_CASE__ : int = value class __a (UpperCamelCase_): '''simple docstring''' def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a=None , _a=None , _a=None , ) -> Union[str, Any]: """simple docstring""" super().__init__() self.register_modules( vae=_a , text_encoder=_a , clip_model=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , coca_model=_a , coca_tokenizer=_a , coca_transform=_a , ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ( feature_extractor.size if isinstance(feature_extractor.size , _a ) else feature_extractor.size["""shortest_edge"""] ) SCREAMING_SNAKE_CASE__ : List[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , _a ) set_requires_grad(self.clip_model , _a ) def _a ( self , _a = "auto" ) -> Dict: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_a ) def _a ( self ) -> List[str]: """simple docstring""" self.enable_attention_slicing(_a ) def _a ( self ) -> List[Any]: """simple docstring""" set_requires_grad(self.vae , _a ) def _a ( self ) -> Dict: """simple docstring""" set_requires_grad(self.vae , _a ) def _a ( self ) -> Optional[Any]: """simple docstring""" set_requires_grad(self.unet , _a ) def _a ( self ) -> int: """simple docstring""" set_requires_grad(self.unet , _a ) def _a ( self , _a , _a , _a ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = min(int(num_inference_steps * strength ) , _a ) SCREAMING_SNAKE_CASE__ : Optional[int] = max(num_inference_steps - init_timestep , 0 ) SCREAMING_SNAKE_CASE__ : Dict = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _a ( self , _a , _a , _a , _a , _a , _a=None ) -> Optional[Any]: """simple docstring""" if not isinstance(_a , torch.Tensor ): raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(_a )}''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = image.to(device=_a , dtype=_a ) if isinstance(_a , _a ): SCREAMING_SNAKE_CASE__ : int = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_a ) ] SCREAMING_SNAKE_CASE__ : Tuple = torch.cat(_a , dim=0 ) else: SCREAMING_SNAKE_CASE__ : int = self.vae.encode(_a ).latent_dist.sample(_a ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor SCREAMING_SNAKE_CASE__ : Optional[Any] = 0.18_215 * init_latents SCREAMING_SNAKE_CASE__ : List[str] = init_latents.repeat_interleave(_a , dim=0 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = randn_tensor(init_latents.shape , generator=_a , device=_a , dtype=_a ) # get latents SCREAMING_SNAKE_CASE__ : Any = self.scheduler.add_noise(_a , _a , _a ) SCREAMING_SNAKE_CASE__ : Optional[Any] = init_latents return latents def _a ( self , _a ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.coca_transform(_a ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): SCREAMING_SNAKE_CASE__ : List[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) SCREAMING_SNAKE_CASE__ : str = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" ) def _a ( self , _a , _a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.feature_extractor.preprocess(_a ) SCREAMING_SNAKE_CASE__ : str = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half() SCREAMING_SNAKE_CASE__ : Any = self.clip_model.get_image_features(_a ) SCREAMING_SNAKE_CASE__ : int = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_embeddings_clip.repeat_interleave(_a , dim=0 ) return image_embeddings_clip @torch.enable_grad() def _a ( self , _a , _a , _a , _a , _a , _a , _a , ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = latents.detach().requires_grad_() SCREAMING_SNAKE_CASE__ : str = self.scheduler.scale_model_input(_a , _a ) # predict the noise residual SCREAMING_SNAKE_CASE__ : Any = self.unet(_a , _a , encoder_hidden_states=_a ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler.alphas_cumprod[timestep] SCREAMING_SNAKE_CASE__ : List[Any] = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf SCREAMING_SNAKE_CASE__ : Optional[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 SCREAMING_SNAKE_CASE__ : List[str] = torch.sqrt(_a ) SCREAMING_SNAKE_CASE__ : Dict = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , _a ): SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler.sigmas[index] SCREAMING_SNAKE_CASE__ : Dict = latents - sigma * noise_pred else: raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 / 0.18_215 * sample SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vae.decode(_a ).sample SCREAMING_SNAKE_CASE__ : Any = (image / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE__ : Any = transforms.Resize(self.feature_extractor_size )(_a ) SCREAMING_SNAKE_CASE__ : Dict = self.normalize(_a ).to(latents.dtype ) SCREAMING_SNAKE_CASE__ : Tuple = self.clip_model.get_image_features(_a ) SCREAMING_SNAKE_CASE__ : int = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = spherical_dist_loss(_a , _a ).mean() * clip_guidance_scale SCREAMING_SNAKE_CASE__ : Optional[Any] = -torch.autograd.grad(_a , _a )[0] if isinstance(self.scheduler , _a ): SCREAMING_SNAKE_CASE__ : Any = latents.detach() + grads * (sigma**2) SCREAMING_SNAKE_CASE__ : Optional[int] = noise_pred_original else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = noise_pred_original - torch.sqrt(_a ) * grads return noise_pred, latents @torch.no_grad() def __call__( self , _a , _a , _a = None , _a = None , _a = 512 , _a = 512 , _a = 0.6 , _a = 50 , _a = 7.5 , _a = 1 , _a = 0.0 , _a = 100 , _a = None , _a = "pil" , _a = True , _a = 0.8 , _a = 0.1 , _a = 0.1 , ) -> int: """simple docstring""" if isinstance(_a , _a ) and len(_a ) != batch_size: raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(_a )} generators.''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if isinstance(_a , torch.Generator ) and batch_size > 1: SCREAMING_SNAKE_CASE__ : Optional[Any] = [generator] + [None] * (batch_size - 1) SCREAMING_SNAKE_CASE__ : List[Any] = [ ("""model""", self.coca_model is None), ("""tokenizer""", self.coca_tokenizer is None), ("""transform""", self.coca_transform is None), ] SCREAMING_SNAKE_CASE__ : Optional[int] = [x[0] for x in coca_is_none if x[1]] SCREAMING_SNAKE_CASE__ : Union[str, Any] = """, """.join(_a ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(_a ): raise ValueError( f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.''' f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) SCREAMING_SNAKE_CASE__ : Any = self.get_image_description(_a ) if style_prompt is None: if len(_a ): raise ValueError( f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.''' f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_description(_a ) # get prompt text embeddings for content and style SCREAMING_SNAKE_CASE__ : Any = self.tokenizer( _a , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors="""pt""" , ) SCREAMING_SNAKE_CASE__ : Any = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer( _a , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors="""pt""" , ) SCREAMING_SNAKE_CASE__ : List[str] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] SCREAMING_SNAKE_CASE__ : Union[str, Any] = slerp(_a , _a , _a ) # duplicate text embeddings for each generation per prompt SCREAMING_SNAKE_CASE__ : int = text_embeddings.repeat_interleave(_a , dim=0 ) # set timesteps SCREAMING_SNAKE_CASE__ : Union[str, Any] = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) SCREAMING_SNAKE_CASE__ : Tuple = {} if accepts_offset: SCREAMING_SNAKE_CASE__ : List[str] = 1 self.scheduler.set_timesteps(_a , **_a ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_timesteps(_a , _a , self.device ) SCREAMING_SNAKE_CASE__ : List[str] = timesteps[:1].repeat(_a ) # Preprocess image SCREAMING_SNAKE_CASE__ : str = preprocess(_a , _a , _a ) SCREAMING_SNAKE_CASE__ : Dict = self.prepare_latents( _a , _a , _a , text_embeddings.dtype , self.device , _a ) SCREAMING_SNAKE_CASE__ : List[Any] = preprocess(_a , _a , _a ) SCREAMING_SNAKE_CASE__ : Any = self.prepare_latents( _a , _a , _a , text_embeddings.dtype , self.device , _a ) SCREAMING_SNAKE_CASE__ : List[Any] = slerp(_a , _a , _a ) if clip_guidance_scale > 0: SCREAMING_SNAKE_CASE__ : List[str] = self.get_clip_image_embeddings(_a , _a ) SCREAMING_SNAKE_CASE__ : List[Any] = self.get_clip_image_embeddings(_a , _a ) SCREAMING_SNAKE_CASE__ : Dict = slerp( _a , _a , _a ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. SCREAMING_SNAKE_CASE__ : str = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: SCREAMING_SNAKE_CASE__ : Union[str, Any] = content_text_input.input_ids.shape[-1] SCREAMING_SNAKE_CASE__ : str = self.tokenizer([""""""] , padding="""max_length""" , max_length=_a , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE__ : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt SCREAMING_SNAKE_CASE__ : Tuple = uncond_embeddings.repeat_interleave(_a , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. SCREAMING_SNAKE_CASE__ : Dict = (batch_size, self.unet.config.in_channels, height // 8, width // 8) SCREAMING_SNAKE_CASE__ : Any = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps SCREAMING_SNAKE_CASE__ : List[str] = torch.randn(_a , generator=_a , device="""cpu""" , dtype=_a ).to( self.device ) else: SCREAMING_SNAKE_CASE__ : Any = torch.randn(_a , generator=_a , device=self.device , dtype=_a ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler SCREAMING_SNAKE_CASE__ : List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] SCREAMING_SNAKE_CASE__ : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) SCREAMING_SNAKE_CASE__ : str = {} if accepts_eta: SCREAMING_SNAKE_CASE__ : Optional[Any] = eta # check if the scheduler accepts generator SCREAMING_SNAKE_CASE__ : int = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: SCREAMING_SNAKE_CASE__ : Optional[Any] = generator with self.progress_bar(total=_a ): for i, t in enumerate(_a ): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler.scale_model_input(_a , _a ) # predict the noise residual SCREAMING_SNAKE_CASE__ : List[Any] = self.unet(_a , _a , encoder_hidden_states=_a ).sample # perform classifier free guidance if do_classifier_free_guidance: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = noise_pred.chunk(2 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: SCREAMING_SNAKE_CASE__ : List[Any] = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.cond_fn( _a , _a , _a , _a , _a , _a , _a , ) # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE__ : Any = self.scheduler.step(_a , _a , _a , **_a ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor SCREAMING_SNAKE_CASE__ : List[Any] = 1 / 0.18_215 * latents SCREAMING_SNAKE_CASE__ : int = self.vae.decode(_a ).sample SCREAMING_SNAKE_CASE__ : str = (image / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": SCREAMING_SNAKE_CASE__ : int = self.numpy_to_pil(_a ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a )
132
0
UpperCAmelCase_ = [0, 2, 4, 6, 8] UpperCAmelCase_ = [1, 3, 5, 7, 9] def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: list[int] , __UpperCAmelCase: int ) -> int: if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 UpperCamelCase__ : Optional[Any] = 0 for digit in range(10 ): UpperCamelCase__ : List[str] = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , __UpperCAmelCase , __UpperCAmelCase ) return result UpperCamelCase__ : Tuple = 0 for digita in range(10 ): UpperCamelCase__ : List[Any] = digita if (remainder + digita) % 2 == 0: UpperCamelCase__ : Optional[int] = ODD_DIGITS else: UpperCamelCase__ : Any = EVEN_DIGITS for digita in other_parity_digits: UpperCamelCase__ : List[Any] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , __UpperCAmelCase , __UpperCAmelCase , ) return result def lowerCAmelCase_ ( __UpperCAmelCase: int = 9 ) -> int: UpperCamelCase__ : Optional[int] = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(__UpperCAmelCase , 0 , [0] * length , __UpperCAmelCase ) return result if __name__ == "__main__": print(F'''{solution() = }''')
247
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore UpperCAmelCase_ = '\nHuman: <<task>>\n\nAssistant: ' UpperCAmelCase_ = 'huggingface-tools/default-prompts' UpperCAmelCase_ = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'} def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Optional[Any]="run" ) -> int: if prompt_or_repo_id is None: UpperCamelCase__ : List[Any] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search('''\\s''' , __UpperCAmelCase ) is not None: return prompt_or_repo_id UpperCamelCase__ : Any = cached_file( __UpperCAmelCase , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} ) with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f: return f.read()
247
1
"""simple docstring""" def a__ ( __SCREAMING_SNAKE_CASE ) -> list[int]: __lowerCAmelCase: Optional[int] = [0 for i in range(len(__SCREAMING_SNAKE_CASE ) )] # initialize interval's left pointer and right pointer __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = 0, 0 for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ): # case when current index is inside the interval if i <= right_pointer: __lowerCAmelCase: Any = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __lowerCAmelCase: List[str] = min_edge while go_next(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __lowerCAmelCase , __lowerCAmelCase: Tuple = i, i + z_result[i] - 1 return z_result def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool: return i + z_result[i] < len(__SCREAMING_SNAKE_CASE ) and s[z_result[i]] == s[i + z_result[i]] def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int: __lowerCAmelCase: Tuple = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __lowerCAmelCase: int = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(__SCREAMING_SNAKE_CASE ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
217
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json", } class snake_case ( __snake_case ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = """switch_transformers""" SCREAMING_SNAKE_CASE_ : Tuple = ["""past_key_values"""] SCREAMING_SNAKE_CASE_ : Any = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : List[str] , UpperCamelCase__ : List[str]=3_2_1_2_8 , UpperCamelCase__ : str=7_6_8 , UpperCamelCase__ : Union[str, Any]=6_4 , UpperCamelCase__ : Optional[int]=2_0_4_8 , UpperCamelCase__ : Dict=6_4 , UpperCamelCase__ : List[str]=1_2 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Dict=1_2 , UpperCamelCase__ : List[str]=8 , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=0.01 , UpperCamelCase__ : Optional[int]="float32" , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : Union[str, Any]=1_2_8 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=1e-6 , UpperCamelCase__ : Optional[Any]=0.001 , UpperCamelCase__ : Dict=0.001 , UpperCamelCase__ : int=1.0 , UpperCamelCase__ : str="relu" , UpperCamelCase__ : int=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str=1 , **UpperCamelCase__ : Tuple , )-> Optional[Any]: '''simple docstring''' __lowerCAmelCase: int = vocab_size __lowerCAmelCase: str = d_model __lowerCAmelCase: str = d_kv __lowerCAmelCase: str = d_ff __lowerCAmelCase: List[str] = num_sparse_encoder_layers __lowerCAmelCase: List[Any] = num_layers __lowerCAmelCase: Optional[Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __lowerCAmelCase: Tuple = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: __lowerCAmelCase: int = self.num_layers // self.num_sparse_encoder_layers else: __lowerCAmelCase: Union[str, Any] = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: __lowerCAmelCase: Dict = self.num_decoder_layers // self.num_sparse_decoder_layers else: __lowerCAmelCase: Any = self.num_decoder_layers # HACK: this will create 0 sparse layers __lowerCAmelCase: Dict = num_heads __lowerCAmelCase: Dict = num_experts __lowerCAmelCase: Any = expert_capacity __lowerCAmelCase: List[Any] = router_bias __lowerCAmelCase: int = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}") __lowerCAmelCase: Dict = router_dtype __lowerCAmelCase: Optional[Any] = router_ignore_padding_tokens __lowerCAmelCase: Union[str, Any] = relative_attention_num_buckets __lowerCAmelCase: str = relative_attention_max_distance __lowerCAmelCase: Optional[int] = dropout_rate __lowerCAmelCase: Optional[Any] = layer_norm_epsilon __lowerCAmelCase: int = initializer_factor __lowerCAmelCase: Tuple = feed_forward_proj __lowerCAmelCase: int = use_cache __lowerCAmelCase: int = add_router_probs __lowerCAmelCase: Optional[Any] = router_z_loss_coef __lowerCAmelCase: Dict = router_aux_loss_coef __lowerCAmelCase: Union[str, Any] = self.feed_forward_proj.split("-") __lowerCAmelCase: Tuple = act_info[-1] __lowerCAmelCase: str = act_info[0] == "gated" if len(UpperCamelCase__) > 1 and act_info[0] != "gated" or len(UpperCamelCase__) > 2: raise ValueError( f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer." "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'") # for backwards compatibility if feed_forward_proj == "gated-gelu": __lowerCAmelCase: List[str] = "gelu_new" super().__init__( pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ , )
217
1
def a__ ( __UpperCamelCase ): if not isinstance(__lowerCamelCase , __lowerCamelCase ): raise TypeError("only integers accepted as input" ) else: SCREAMING_SNAKE_CASE_ = str(abs(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE_ = [list(__lowerCamelCase ) for char in range(len(__lowerCamelCase ) )] for index in range(len(__lowerCamelCase ) ): num_transpositions[index].pop(__lowerCamelCase ) return max( int("".join(list(__lowerCamelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("doctest").testmod()
360
from __future__ import annotations import collections import pprint from pathlib import Path def a__ ( __UpperCamelCase ): return "".join(sorted(__UpperCamelCase ) ) def a__ ( __UpperCamelCase ): return word_by_signature[signature(__UpperCamelCase )] A : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") A : int = sorted({word.strip().lower() for word in data.splitlines()}) A : Tuple = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": A : Union[str, Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("anagrams.txt", "w") as file: file.write("all_anagrams = \n ") file.write(pprint.pformat(all_anagrams))
305
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = '''▁''' SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''sentencepiece.bpe.model'''} SCREAMING_SNAKE_CASE_ = { '''vocab_file''': { '''facebook/mbart-large-50-one-to-many-mmt''': ( '''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model''' ), } } SCREAMING_SNAKE_CASE_ = { '''facebook/mbart-large-50-one-to-many-mmt''': 1_024, } # fmt: off SCREAMING_SNAKE_CASE_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI'''] class lowerCAmelCase_ ( A__ ): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = ['''input_ids''', '''attention_mask'''] _snake_case = [] _snake_case = [] def __init__( self , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_ = None , **snake_case_ , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __lowerCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token __lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs __lowerCAmelCase = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=snake_case_ , tgt_lang=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , ) __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(snake_case_ ) ) __lowerCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __lowerCAmelCase = 1 __lowerCAmelCase = len(self.sp_model ) __lowerCAmelCase = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case_ ) } __lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()} __lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __lowerCAmelCase = src_lang if src_lang is not None else """en_XX""" __lowerCAmelCase = self.lang_code_to_id[self._src_lang] __lowerCAmelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def A__ ( self ) -> int: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def A__ ( self ) -> str: return self._src_lang @src_lang.setter def A__ ( self , snake_case_ ) -> None: __lowerCAmelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ) -> Dict: __lowerCAmelCase = self.__dict__.copy() __lowerCAmelCase = None return state def __setstate__( self , snake_case_ ) -> None: __lowerCAmelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __lowerCAmelCase = {} __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A__ ( self ) -> Dict: __lowerCAmelCase = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def A__ ( self , snake_case_ ) -> List[str]: return self.sp_model.encode(snake_case_ , out_type=snake_case_ ) def A__ ( self , snake_case_ ) -> int: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __lowerCAmelCase = self.sp_model.PieceToId(snake_case_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def A__ ( self , snake_case_ ) -> str: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def A__ ( self , snake_case_ ) -> Optional[Any]: __lowerCAmelCase = [] __lowerCAmelCase = """""" __lowerCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(snake_case_ ) + token __lowerCAmelCase = True __lowerCAmelCase = [] else: current_sub_tokens.append(snake_case_ ) __lowerCAmelCase = False out_string += self.sp_model.decode(snake_case_ ) return out_string.strip() def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]: if not os.path.isdir(snake_case_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase = os.path.join( snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case_ ) elif not os.path.isfile(self.vocab_file ): with open(snake_case_ , """wb""" ) as fi: __lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(snake_case_ ) return (out_vocab_file,) def A__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ ) __lowerCAmelCase = [1] * len(self.prefix_tokens ) __lowerCAmelCase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) __lowerCAmelCase = src_lang __lowerCAmelCase = self(snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , **snake_case_ ) __lowerCAmelCase = self.convert_tokens_to_ids(snake_case_ ) __lowerCAmelCase = tgt_lang_id return inputs def A__ ( self , snake_case_ , snake_case_ = "en_XX" , snake_case_ = None , snake_case_ = "ro_RO" , **snake_case_ , ) -> BatchEncoding: __lowerCAmelCase = src_lang __lowerCAmelCase = tgt_lang return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ ) def A__ ( self ) -> Tuple: return self.set_src_lang_special_tokens(self.src_lang ) def A__ ( self ) -> str: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def A__ ( self , snake_case_ ) -> None: __lowerCAmelCase = self.lang_code_to_id[src_lang] __lowerCAmelCase = [self.cur_lang_code_id] __lowerCAmelCase = [self.eos_token_id] def A__ ( self , snake_case_ ) -> None: __lowerCAmelCase = self.lang_code_to_id[tgt_lang] __lowerCAmelCase = [self.cur_lang_code_id] __lowerCAmelCase = [self.eos_token_id]
301
"""simple docstring""" import sys import turtle def lowercase (_lowerCAmelCase , _lowerCAmelCase ): return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 ) triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 ) triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( '''Correct format for using this script: ''' '''python fractals.py <int:depth_for_fractal>''' ) SCREAMING_SNAKE_CASE_ = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('''red''') SCREAMING_SNAKE_CASE_ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
301
1
'''simple docstring''' from __future__ import annotations from collections import Counter from random import random class __lowerCamelCase : """simple docstring""" def __init__( self : List[Any]): _A : List[Any] = {} def A ( self : int , SCREAMING_SNAKE_CASE : str): _A : Optional[Any] = {} def A ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : float): if nodea not in self.connections: self.add_node(SCREAMING_SNAKE_CASE) if nodea not in self.connections: self.add_node(SCREAMING_SNAKE_CASE) _A : Any = probability def A ( self : Any): return list(self.connections) def A ( self : Tuple , SCREAMING_SNAKE_CASE : str): _A : Dict = 0 _A : Optional[int] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def lowerCAmelCase__ ( lowerCamelCase : str ,lowerCamelCase : list[tuple[str, str, float]] ,lowerCamelCase : int ): _A : Optional[Any] = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ) _A : Optional[int] = Counter(graph.get_nodes() ) _A : Dict = start for _ in range(lowerCamelCase ): _A : List[Any] = graph.transition(lowerCamelCase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
227
'''simple docstring''' # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class __lowerCamelCase ( a_ , a_ , a_ , unittest.TestCase ): """simple docstring""" a = StableDiffusionControlNetImgaImgPipeline a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) a = IMAGE_TO_IMAGE_IMAGE_PARAMS def A ( self : Tuple): torch.manual_seed(0) _A : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) torch.manual_seed(0) _A : Optional[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0) _A : Any = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , ) torch.manual_seed(0) _A : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0) _A : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _A : Any = CLIPTextModel(SCREAMING_SNAKE_CASE) _A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') _A : Tuple = { 'unet': unet, 'controlnet': controlnet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple=0): if str(SCREAMING_SNAKE_CASE).startswith('mps'): _A : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE) else: _A : Tuple = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE) _A : Union[str, Any] = 2 _A : Tuple = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE , device=torch.device(SCREAMING_SNAKE_CASE) , ) _A : Tuple = floats_tensor(control_image.shape , rng=random.Random(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE) _A : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1)[0] _A : Union[str, Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE)).convert('RGB').resize((64, 64)) _A : Any = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', 'image': image, 'control_image': control_image, } return inputs def A ( self : str): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def A ( self : Tuple): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def A ( self : int): self._test_inference_batch_single_identical(expected_max_diff=2e-3) class __lowerCamelCase ( a_ , a_ , unittest.TestCase ): """simple docstring""" a = StableDiffusionControlNetImgaImgPipeline a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def A ( self : List[str]): torch.manual_seed(0) _A : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) torch.manual_seed(0) def init_weights(SCREAMING_SNAKE_CASE : Union[str, Any]): if isinstance(SCREAMING_SNAKE_CASE , torch.nn.Convad): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) _A : int = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(SCREAMING_SNAKE_CASE) torch.manual_seed(0) _A : str = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(SCREAMING_SNAKE_CASE) torch.manual_seed(0) _A : List[Any] = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , ) torch.manual_seed(0) _A : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0) _A : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _A : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE) _A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') _A : List[str] = MultiControlNetModel([controlneta, controlneta]) _A : List[str] = { 'unet': unet, 'controlnet': controlnet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str]=0): if str(SCREAMING_SNAKE_CASE).startswith('mps'): _A : Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE) else: _A : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE) _A : Union[str, Any] = 2 _A : List[str] = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE , device=torch.device(SCREAMING_SNAKE_CASE) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE , device=torch.device(SCREAMING_SNAKE_CASE) , ), ] _A : str = floats_tensor(control_image[0].shape , rng=random.Random(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE) _A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1)[0] _A : Optional[int] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE)).convert('RGB').resize((64, 64)) _A : Optional[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', 'image': image, 'control_image': control_image, } return inputs def A ( self : Tuple): _A : List[str] = self.get_dummy_components() _A : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE) pipe.to(SCREAMING_SNAKE_CASE) _A : int = 10.0 _A : Union[str, Any] = 4 _A : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE) _A : List[Any] = steps _A : List[str] = scale _A : int = pipe(**SCREAMING_SNAKE_CASE)[0] _A : Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE) _A : Union[str, Any] = steps _A : Any = scale _A : Dict = pipe(**SCREAMING_SNAKE_CASE , control_guidance_start=0.1 , control_guidance_end=0.2)[0] _A : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE) _A : str = steps _A : List[Any] = scale _A : int = pipe(**SCREAMING_SNAKE_CASE , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0] _A : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE) _A : Tuple = steps _A : Tuple = scale _A : str = pipe(**SCREAMING_SNAKE_CASE , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a)) > 1e-3 assert np.sum(np.abs(output_a - output_a)) > 1e-3 assert np.sum(np.abs(output_a - output_a)) > 1e-3 def A ( self : Optional[Any]): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def A ( self : Any): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def A ( self : Dict): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def A ( self : str): _A : Optional[int] = self.get_dummy_components() _A : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE) pipe.to(SCREAMING_SNAKE_CASE) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(SCREAMING_SNAKE_CASE) except NotImplementedError: pass @slow @require_torch_gpu class __lowerCamelCase ( unittest.TestCase ): """simple docstring""" def A ( self : Optional[Any]): super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : Any): _A : Dict = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny') _A : List[Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , safety_checker=SCREAMING_SNAKE_CASE , controlnet=SCREAMING_SNAKE_CASE) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _A : List[Any] = torch.Generator(device='cpu').manual_seed(0) _A : List[Any] = 'evil space-punk bird' _A : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png').resize((512, 512)) _A : List[str] = load_image( 'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png').resize((512, 512)) _A : Dict = pipe( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , control_image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , output_type='np' , num_inference_steps=50 , strength=0.6 , ) _A : Optional[int] = output.images[0] assert image.shape == (512, 512, 3) _A : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy') assert np.abs(expected_image - image).max() < 9e-2
227
1
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase = 1 @register_to_config def __init__( self , _snake_case=2000 , _snake_case=0.1 , _snake_case=20 , _snake_case=1e-3 ): """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None def snake_case ( self , _snake_case , _snake_case = None ): """simple docstring""" _lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , _snake_case , device=_snake_case ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case=None ): """simple docstring""" if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score _lowerCAmelCase = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) _lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) _lowerCAmelCase = std.flatten() while len(std.shape ) < len(score.shape ): _lowerCAmelCase = std.unsqueeze(-1 ) _lowerCAmelCase = -score / std # compute _lowerCAmelCase = -1.0 / len(self.timesteps ) _lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) _lowerCAmelCase = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): _lowerCAmelCase = beta_t.unsqueeze(-1 ) _lowerCAmelCase = -0.5 * beta_t * x _lowerCAmelCase = torch.sqrt(_snake_case ) _lowerCAmelCase = drift - diffusion**2 * score _lowerCAmelCase = x + drift * dt # add noise _lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=_snake_case , device=x.device , dtype=x.dtype ) _lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
82
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : int = "efficientformer" def __init__( self : Optional[int] ,lowerCamelCase__ : List[int] = [3, 2, 6, 4] ,lowerCamelCase__ : List[int] = [48, 96, 224, 448] ,lowerCamelCase__ : List[bool] = [True, True, True, True] ,lowerCamelCase__ : int = 448 ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : int = 7 ,lowerCamelCase__ : int = 5 ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : float = 1e-5 ,lowerCamelCase__ : str = "gelu" ,lowerCamelCase__ : float = 0.02 ,lowerCamelCase__ : float = 1e-1_2 ,lowerCamelCase__ : int = 224 ,lowerCamelCase__ : float = 1e-0_5 ,**lowerCamelCase__ : str ,) -> None: '''simple docstring''' super().__init__(**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = mlp_expansion_ratio SCREAMING_SNAKE_CASE = downsamples SCREAMING_SNAKE_CASE = dim SCREAMING_SNAKE_CASE = key_dim SCREAMING_SNAKE_CASE = attention_ratio SCREAMING_SNAKE_CASE = resolution SCREAMING_SNAKE_CASE = pool_size SCREAMING_SNAKE_CASE = downsample_patch_size SCREAMING_SNAKE_CASE = downsample_stride SCREAMING_SNAKE_CASE = downsample_pad SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = num_metaad_blocks SCREAMING_SNAKE_CASE = distillation SCREAMING_SNAKE_CASE = use_layer_scale SCREAMING_SNAKE_CASE = layer_scale_init_value SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = batch_norm_eps
296
0
'''simple docstring''' import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowercase ='platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class __magic_name__ : UpperCAmelCase =PegasusConfig UpperCAmelCase ={} UpperCAmelCase ="gelu" def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=False , snake_case=9_9 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case=0.1 , snake_case=0.1 , snake_case=2_0 , snake_case=2 , snake_case=1 , snake_case=0 , ) -> Dict: '''simple docstring''' _UpperCAmelCase : Union[str, Any] =parent _UpperCAmelCase : Union[str, Any] =batch_size _UpperCAmelCase : List[Any] =seq_length _UpperCAmelCase : Optional[Any] =is_training _UpperCAmelCase : Dict =use_labels _UpperCAmelCase : Union[str, Any] =vocab_size _UpperCAmelCase : int =hidden_size _UpperCAmelCase : Any =num_hidden_layers _UpperCAmelCase : int =num_attention_heads _UpperCAmelCase : str =intermediate_size _UpperCAmelCase : Union[str, Any] =hidden_dropout_prob _UpperCAmelCase : Dict =attention_probs_dropout_prob _UpperCAmelCase : str =max_position_embeddings _UpperCAmelCase : List[str] =eos_token_id _UpperCAmelCase : Dict =pad_token_id _UpperCAmelCase : str =bos_token_id def lowerCAmelCase ( self) -> Any: '''simple docstring''' _UpperCAmelCase : Optional[int] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size) _UpperCAmelCase : int =np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1) _UpperCAmelCase : int =np.concatenate([input_ids, eos_tensor] , axis=1) _UpperCAmelCase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCAmelCase : str =self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCAmelCase : List[Any] =prepare_pegasus_inputs_dict(snake_case , snake_case , snake_case) return config, inputs_dict def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> int: '''simple docstring''' _UpperCAmelCase : List[str] =2_0 _UpperCAmelCase : List[Any] =model_class_name(snake_case) _UpperCAmelCase : List[str] =model.encode(inputs_dict['input_ids']) _UpperCAmelCase , _UpperCAmelCase : int =( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _UpperCAmelCase : str =model.init_cache(decoder_input_ids.shape[0] , snake_case , snake_case) _UpperCAmelCase : Optional[int] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4') _UpperCAmelCase : Any =jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase : Optional[int] =model.decode( decoder_input_ids[:, :-1] , snake_case , decoder_attention_mask=snake_case , past_key_values=snake_case , decoder_position_ids=snake_case , ) _UpperCAmelCase : int =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4') _UpperCAmelCase : Dict =model.decode( decoder_input_ids[:, -1:] , snake_case , decoder_attention_mask=snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=snake_case , ) _UpperCAmelCase : Dict =model.decode(snake_case , snake_case) _UpperCAmelCase : Union[str, Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}") def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> Tuple: '''simple docstring''' _UpperCAmelCase : Dict =2_0 _UpperCAmelCase : Union[str, Any] =model_class_name(snake_case) _UpperCAmelCase : Any =model.encode(inputs_dict['input_ids']) _UpperCAmelCase , _UpperCAmelCase : int =( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _UpperCAmelCase : Any =jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ] , axis=-1 , ) _UpperCAmelCase : Dict =model.init_cache(decoder_input_ids.shape[0] , snake_case , snake_case) _UpperCAmelCase : Any =jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _UpperCAmelCase : str =model.decode( decoder_input_ids[:, :-1] , snake_case , decoder_attention_mask=snake_case , past_key_values=snake_case , decoder_position_ids=snake_case , ) _UpperCAmelCase : str =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4') _UpperCAmelCase : Any =model.decode( decoder_input_ids[:, -1:] , snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=snake_case , decoder_position_ids=snake_case , ) _UpperCAmelCase : Union[str, Any] =model.decode(snake_case , snake_case , decoder_attention_mask=snake_case) _UpperCAmelCase : str =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}") def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None , ): '''simple docstring''' if attention_mask is None: _UpperCAmelCase : List[str] =np.not_equal(__lowerCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _UpperCAmelCase : List[Any] =np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ): UpperCAmelCase =( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) UpperCAmelCase =(FlaxPegasusForConditionalGeneration,) if is_flax_available() else () UpperCAmelCase =True UpperCAmelCase =False UpperCAmelCase =False UpperCAmelCase =False def lowerCAmelCase ( self) -> int: '''simple docstring''' _UpperCAmelCase : Dict =FlaxPegasusModelTester(self) _UpperCAmelCase : int =ConfigTester(self , config_class=snake_case) def lowerCAmelCase ( self) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(snake_case , snake_case , snake_case) def lowerCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(snake_case , snake_case , snake_case) def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): _UpperCAmelCase : Optional[Any] =self._prepare_for_class(snake_case , snake_case) _UpperCAmelCase : Optional[int] =model_class(snake_case) @jax.jit def encode_jitted(snake_case , snake_case=None , **snake_case): return model.encode(input_ids=snake_case , attention_mask=snake_case) with self.subTest('JIT Enabled'): _UpperCAmelCase : List[Any] =encode_jitted(**snake_case).to_tuple() with self.subTest('JIT Disabled'): with jax.disable_jit(): _UpperCAmelCase : List[Any] =encode_jitted(**snake_case).to_tuple() self.assertEqual(len(snake_case) , len(snake_case)) for jitted_output, output in zip(snake_case , snake_case): self.assertEqual(jitted_output.shape , output.shape) def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): _UpperCAmelCase : Tuple =model_class(snake_case) _UpperCAmelCase : Optional[int] =model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask']) _UpperCAmelCase : int ={ 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(snake_case , snake_case , snake_case): return model.decode( decoder_input_ids=snake_case , decoder_attention_mask=snake_case , encoder_outputs=snake_case , ) with self.subTest('JIT Enabled'): _UpperCAmelCase : Tuple =decode_jitted(**snake_case).to_tuple() with self.subTest('JIT Disabled'): with jax.disable_jit(): _UpperCAmelCase : str =decode_jitted(**snake_case).to_tuple() self.assertEqual(len(snake_case) , len(snake_case)) for jitted_output, output in zip(snake_case , snake_case): self.assertEqual(jitted_output.shape , output.shape) @slow def lowerCAmelCase ( self) -> Dict: '''simple docstring''' for model_class_name in self.all_model_classes: _UpperCAmelCase : Optional[Any] =model_class_name.from_pretrained('google/pegasus-large' , from_pt=snake_case) _UpperCAmelCase : int =np.ones((1, 1)) _UpperCAmelCase : int =model(snake_case) self.assertIsNotNone(snake_case) @slow def lowerCAmelCase ( self) -> str: '''simple docstring''' _UpperCAmelCase : Optional[int] =FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum') _UpperCAmelCase : str =PegasusTokenizer.from_pretrained('google/pegasus-xsum') _UpperCAmelCase : Dict =[ ' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.', ' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ', ] _UpperCAmelCase : int =[ 'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.', 'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.', ] _UpperCAmelCase : int =tokenizer(snake_case , return_tensors='np' , truncation=snake_case , max_length=5_1_2 , padding=snake_case) _UpperCAmelCase : int =model.generate(**snake_case , num_beams=2).sequences _UpperCAmelCase : Optional[int] =tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case) assert tgt_text == decoded
242
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore lowercase ='\nHuman: <<task>>\n\nAssistant: ' lowercase ='huggingface-tools/default-prompts' lowercase ={'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'} def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int="run" ): '''simple docstring''' if prompt_or_repo_id is None: _UpperCAmelCase : List[str] =DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search('\\s' , __lowerCamelCase ) is not None: return prompt_or_repo_id _UpperCAmelCase : Dict =cached_file( __lowerCamelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} ) with open(__lowerCamelCase , 'r' , encoding='utf-8' ) as f: return f.read()
242
1
"""simple docstring""" import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = """▁""" lowercase__ = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""} lowercase__ = { """sentencepiece_model_file""": """sentencepiece.bpe.model""", """vocab_file""": """vocab.txt""", } lowercase__ = { """vocab_file""": { """ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""", """ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""", }, """sentencepiece_model_file""": { """ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""", """ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""", }, } lowercase__ = { """ernie-m-base""": 514, """ernie-m-large""": 514, } lowercase__ = { """ernie-m-base""": {"""do_lower_case""": False}, """ernie-m-large""": {"""do_lower_case""": False}, } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = ["input_ids"] lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = RESOURCE_FILES_NAMES def __init__( self , lowercase , lowercase=None , lowercase=False , lowercase="utf8" , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase = None , **lowercase , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _lowerCamelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , vocab_file=lowercase , encoding=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , ) _lowerCamelCase : str = do_lower_case _lowerCamelCase : Optional[Any] = sentencepiece_model_ckpt _lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: _lowerCamelCase : Dict = self.load_vocab(filepath=lowercase ) else: _lowerCamelCase : Optional[int] = {self.sp_model.id_to_piece(lowercase ): id for id in range(self.sp_model.get_piece_size() )} _lowerCamelCase : int = {v: k for k, v in self.vocab.items()} def A_ ( self , lowercase ): if text is None: return None _lowerCamelCase : Tuple = self.tokenize(lowercase ) _lowerCamelCase, _lowerCamelCase : Any = '', [] for i, ch in enumerate(lowercase ): if ch in self.SP_CHAR_MAPPING: _lowerCamelCase : List[str] = self.SP_CHAR_MAPPING.get(lowercase ) else: _lowerCamelCase : Dict = unicodedata.normalize('NFKC' , lowercase ) if self.is_whitespace(lowercase ): continue normalized_text += ch char_mapping.extend([i] * len(lowercase ) ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = normalized_text, [], 0 if self.do_lower_case: _lowerCamelCase : List[str] = text.lower() for token in split_tokens: if token[:1] == "▁": _lowerCamelCase : List[str] = token[1:] _lowerCamelCase : Union[str, Any] = text[offset:].index(lowercase ) + offset _lowerCamelCase : Optional[int] = start + len(lowercase ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) _lowerCamelCase : Optional[Any] = end return token_mapping @property def A_ ( self ): return len(self.vocab ) def A_ ( self ): return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self ): _lowerCamelCase : str = self.__dict__.copy() _lowerCamelCase : Optional[int] = None return state def __setstate__( self , lowercase ): _lowerCamelCase : Union[str, Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _lowerCamelCase : List[str] = {} _lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def A_ ( self , lowercase ): return "".join((self.SP_CHAR_MAPPING.get(lowercase , lowercase ) for c in text) ) def A_ ( self , lowercase , lowercase=False , lowercase=64 , lowercase=0.1 ): if self.sp_model_kwargs.get('enable_sampling' ) is True: _lowerCamelCase : Optional[int] = True if self.sp_model_kwargs.get('alpha' ) is not None: _lowerCamelCase : Tuple = self.sp_model_kwargs.get('alpha' ) if self.sp_model_kwargs.get('nbest_size' ) is not None: _lowerCamelCase : Tuple = self.sp_model_kwargs.get('nbest_size' ) if not enable_sampling: _lowerCamelCase : Optional[Any] = self.sp_model.EncodeAsPieces(lowercase ) else: _lowerCamelCase : List[str] = self.sp_model.SampleEncodeAsPieces(lowercase , lowercase , lowercase ) _lowerCamelCase : Tuple = [] for pi, piece in enumerate(lowercase ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(lowercase ) and pi != 0: new_pieces.append(lowercase ) continue else: continue _lowerCamelCase : Tuple = 0 for i, chunk in enumerate(lowercase ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(lowercase ) or self.is_punct(lowercase ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(lowercase ) _lowerCamelCase : int = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) _lowerCamelCase : Any = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) _lowerCamelCase : Tuple = i if len(lowercase ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def A_ ( self , lowercase ): _lowerCamelCase : Optional[int] = ''.join(lowercase ).replace(lowercase , ' ' ).strip() return out_string def A_ ( self , lowercase ): _lowerCamelCase : Union[str, Any] = self.convert_ids_to_tokens(lowercase ) _lowerCamelCase : Any = ''.join(lowercase ).replace(lowercase , ' ' ).strip() return out_string def A_ ( self , lowercase ): return self.vocab.get(lowercase , self.vocab.get(self.unk_token ) ) def A_ ( self , lowercase ): return self.reverse_vocab.get(lowercase , self.unk_token ) def A_ ( self , lowercase , lowercase=None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCamelCase : List[Any] = [self.cls_token_id] _lowerCamelCase : Optional[Any] = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def A_ ( self , lowercase , lowercase=None ): if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def A_ ( self , lowercase , lowercase=None , lowercase=False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1] return [1] + ([0] * len(lowercase )) + [1] def A_ ( self , lowercase , lowercase = None ): # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method if token_ids_a is None: # [CLS] X [SEP] return (len(lowercase ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(lowercase ) + 1) + [1] * (len(lowercase ) + 3) def A_ ( self , lowercase ): if "\u4e00" <= char <= "\u9fff": return True return False def A_ ( self , lowercase ): if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def A_ ( self , lowercase ): if char in ",;:.?!~,;:。?!《》【】": return True return False def A_ ( self , lowercase ): if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(lowercase ) == 1: _lowerCamelCase : Tuple = unicodedata.category(lowercase ) if cat == "Zs": return True return False def A_ ( self , lowercase ): _lowerCamelCase : Tuple = {} with io.open(lowercase , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(lowercase ): _lowerCamelCase : int = line.rstrip('\n' ) _lowerCamelCase : Optional[Any] = int(lowercase ) return token_to_idx def A_ ( self , lowercase , lowercase = None ): _lowerCamelCase : Union[str, Any] = 0 if os.path.isdir(lowercase ): _lowerCamelCase : List[Any] = os.path.join( lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: _lowerCamelCase : Optional[int] = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(lowercase , 'w' , encoding='utf-8' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda lowercase : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' ' Please check that the vocabulary is not corrupted!' ) _lowerCamelCase : Optional[Any] = token_index writer.write(token + '\n' ) index += 1 _lowerCamelCase : List[str] = os.path.join(lowercase , 'sentencepiece.bpe.model' ) with open(lowercase , 'wb' ) as fi: _lowerCamelCase : Dict = self.sp_model.serialized_model_proto() fi.write(lowercase ) return (vocab_file,)
96
"""simple docstring""" from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """new-model""" if is_tf_available(): class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = NewModelConfig @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def A_ ( self ): _lowerCamelCase : List[str] = 'bert-base-cased' _lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): _lowerCamelCase : List[str] = 'bert-base-cased' _lowerCamelCase : int = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : int = TFAutoModelForPreTraining.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : int = TFAutoModelForCausalLM.from_pretrained(lowercase ) _lowerCamelCase, _lowerCamelCase : str = TFAutoModelForCausalLM.from_pretrained(lowercase , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : List[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : str = TFAutoModelWithLMHead.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Tuple = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase ) _lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForMaskedLM.from_pretrained(lowercase , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase ) _lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: _lowerCamelCase : str = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: _lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : List[str] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow @require_tensorflow_probability def A_ ( self ): for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: _lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase ) _lowerCamelCase, _lowerCamelCase : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained( lowercase , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) def A_ ( self ): _lowerCamelCase : int = TFAutoModelWithLMHead.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 ) def A_ ( self ): _lowerCamelCase : Any = TFAutoModelWithLMHead.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 ) def A_ ( self ): # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel _lowerCamelCase : List[str] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Optional[int] = copy.deepcopy(model.config ) _lowerCamelCase : Dict = ['FunnelBaseModel'] _lowerCamelCase : List[Any] = TFAutoModel.from_config(lowercase ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowercase ) _lowerCamelCase : Tuple = TFAutoModel.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) def A_ ( self ): try: AutoConfig.register('new-model' , lowercase ) _lowerCamelCase : Tuple = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(lowercase ): auto_class.register(lowercase , lowercase ) auto_class.register(lowercase , lowercase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): auto_class.register(lowercase , lowercase ) # Now that the config is registered, it can be used as any other config with the auto-API _lowerCamelCase : Optional[Any] = BertModelTester(self ).get_config() _lowerCamelCase : Dict = NewModelConfig(**tiny_config.to_dict() ) _lowerCamelCase : int = auto_class.from_config(lowercase ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowercase ) _lowerCamelCase : List[Any] = auto_class.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def A_ ( self ): with self.assertRaisesRegex( lowercase , 'bert-base is not a local folder and is not a valid model identifier' ): _lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained('bert-base' ) def A_ ( self ): with self.assertRaisesRegex( lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): _lowerCamelCase : str = TFAutoModel.from_pretrained(lowercase , revision='aaaaaa' ) def A_ ( self ): with self.assertRaisesRegex( lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ): _lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' ) def A_ ( self ): with self.assertRaisesRegex(lowercase , 'Use `from_pt=True` to load this model' ): _lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' ) def A_ ( self ): # Make sure we have cached the model. _lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) with RequestCounter() as counter: _lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint _lowerCamelCase : int = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) with RequestCounter() as counter: _lowerCamelCase : List[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
96
1
"""simple docstring""" import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() a : int = logging.get_logger(__name__) a : Dict = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''', '''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''', '''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } a : Tuple = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : int , _lowercase : List[Any] ) ->Tuple: '''simple docstring''' for attribute in key.split("." ): a : Optional[int] = getattr(_lowercase , _lowercase ) if weight_type is not None: a : Optional[Any] = getattr(_lowercase , _lowercase ).shape else: a : Dict = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": a : str = value elif weight_type == "weight_g": a : Union[str, Any] = value elif weight_type == "weight_v": a : Any = value elif weight_type == "bias": a : List[Any] = value else: a : Any = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : Dict ) ->int: '''simple docstring''' a : Tuple = [] a : List[str] = fairseq_model.state_dict() a : Union[str, Any] = hf_model.feature_extractor for name, value in fairseq_dict.items(): a : Any = False if "conv_layers" in name: load_conv_layer( _lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == "group" , ) a : Tuple = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: a : Optional[int] = True if "*" in mapped_key: a : List[str] = name.split(_lowercase )[0].split("." )[-2] a : Any = mapped_key.replace("*" , _lowercase ) if "weight_g" in name: a : str = "weight_g" elif "weight_v" in name: a : int = "weight_v" elif "bias" in name and "relative_attention_bias" not in name: a : Union[str, Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj a : int = "weight" else: a : Dict = None set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) continue if not is_used: unused_weights.append(_lowercase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : str , _lowercase : List[str] , _lowercase : str ) ->Dict: '''simple docstring''' a : str = full_name.split("conv_layers." )[-1] a : int = name.split("." ) a : Tuple = int(items[0] ) a : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) a : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) a : List[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) a : Tuple = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) a : Optional[int] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowercase ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : List[str] , _lowercase : Dict=None ) ->Optional[Any]: '''simple docstring''' a : Tuple = torch.load(_lowercase ) a : Dict = WavLMConfigOrig(checkpoint["cfg"] ) a : Union[str, Any] = WavLMOrig(_lowercase ) model.load_state_dict(checkpoint["model"] ) model.eval() if config_path is not None: a : Union[str, Any] = WavLMConfig.from_pretrained(_lowercase ) else: a : Dict = WavLMConfig() a : List[Any] = WavLMModel(_lowercase ) recursively_load_weights(_lowercase , _lowercase ) hf_wavlm.save_pretrained(_lowercase ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') a : Optional[Any] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
79
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def _SCREAMING_SNAKE_CASE ( ) ->List[str]: '''simple docstring''' a : Optional[Any] = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_lowercase ) a : Optional[Any] = parser.add_subparsers(help="accelerate command helpers" ) # Register commands get_config_parser(subparsers=_lowercase ) env_command_parser(subparsers=_lowercase ) launch_command_parser(subparsers=_lowercase ) tpu_command_parser(subparsers=_lowercase ) test_command_parser(subparsers=_lowercase ) # Let's go a : int = parser.parse_args() if not hasattr(_lowercase , "func" ): parser.print_help() exit(1 ) # Run args.func(_lowercase ) if __name__ == "__main__": main()
79
1
'''simple docstring''' def lowerCamelCase ( lowerCAmelCase : Any = 400_0000 ): """simple docstring""" __magic_name__ : List[Any] = [] __magic_name__ , __magic_name__ : str = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_SCREAMING_SNAKE_CASE ) __magic_name__ , __magic_name__ : Dict = b, a + b return sum(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(F'{solution() = }')
331
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class _lowerCamelCase : def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=50 , __a=0.02 , __a=True , __a=None , ) -> Union[str, Any]: UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = initializer_range UpperCamelCase = use_labels UpperCamelCase = scope def snake_case_ (self ) -> Union[str, Any]: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = self.get_config() return config, input_ids, input_mask, token_labels def snake_case_ (self ) -> List[str]: return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__a , initializer_range=self.initializer_range , ) def snake_case_ (self ) -> List[str]: ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = self.prepare_config_and_inputs() UpperCamelCase = True UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def snake_case_ (self , __a , __a , __a , __a , **__a , ) -> Dict: UpperCamelCase = BertGenerationEncoder(config=__a ) model.to(__a ) model.eval() UpperCamelCase = model(__a , attention_mask=__a ) UpperCamelCase = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case_ (self , __a , __a , __a , __a , __a , __a , **__a , ) -> str: UpperCamelCase = True UpperCamelCase = BertGenerationEncoder(config=__a ) model.to(__a ) model.eval() UpperCamelCase = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , ) UpperCamelCase = model( __a , attention_mask=__a , encoder_hidden_states=__a , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case_ (self , __a , __a , __a , __a , __a , __a , **__a , ) -> Optional[int]: UpperCamelCase = True UpperCamelCase = True UpperCamelCase = BertGenerationDecoder(config=__a ).to(__a ).eval() # first forward pass UpperCamelCase = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , use_cache=__a , ) UpperCamelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCamelCase = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_hidden_states=__a , )["hidden_states"][0] UpperCamelCase = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["hidden_states"][0] # select random slice UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) ) def snake_case_ (self , __a , __a , __a , __a , *__a , ) -> Optional[Any]: UpperCamelCase = BertGenerationDecoder(__a ) model.to(__a ) model.eval() UpperCamelCase = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case_ (self ) -> Dict: UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowerCamelCase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): UpperCAmelCase_ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () UpperCAmelCase_ = (BertGenerationDecoder,) if is_torch_available() else () UpperCAmelCase_ = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def snake_case_ (self ) -> Any: UpperCamelCase = BertGenerationEncoderTester(self ) UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37 ) def snake_case_ (self ) -> Tuple: self.config_tester.run_common_tests() def snake_case_ (self ) -> List[str]: UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def snake_case_ (self ) -> Any: UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs() UpperCamelCase = "bert" self.model_tester.create_and_check_model(__a , __a , __a , __a ) def snake_case_ (self ) -> Optional[Any]: UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__a ) def snake_case_ (self ) -> List[str]: UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a ) def snake_case_ (self ) -> Union[str, Any]: # This regression test was failing with PyTorch < 1.3 ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCamelCase = None self.model_tester.create_and_check_model_as_decoder( __a , __a , __a , __a , __a , __a , ) def snake_case_ (self ) -> str: UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__a ) @slow def snake_case_ (self ) -> List[str]: UpperCamelCase = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) self.assertIsNotNone(__a ) @require_torch class _lowerCamelCase ( unittest.TestCase ): @slow def snake_case_ (self ) -> int: UpperCamelCase = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) UpperCamelCase = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] ) with torch.no_grad(): UpperCamelCase = model(__a )[0] UpperCamelCase = torch.Size([1, 8, 10_24] ) self.assertEqual(output.shape , __a ) UpperCamelCase = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) ) @require_torch class _lowerCamelCase ( unittest.TestCase ): @slow def snake_case_ (self ) -> Optional[Any]: UpperCamelCase = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) UpperCamelCase = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] ) with torch.no_grad(): UpperCamelCase = model(__a )[0] UpperCamelCase = torch.Size([1, 8, 5_03_58] ) self.assertEqual(output.shape , __a ) UpperCamelCase = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
153
0
'''simple docstring''' import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) logging.set_verbosity_info() def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ): if "xprophetnet" in prophetnet_checkpoint_path: __lowercase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase_ ) __lowercase , __lowercase = XLMProphetNetForConditionalGeneration.from_pretrained( lowerCamelCase_ , output_loading_info=lowerCamelCase_ ) else: __lowercase = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase_ ) __lowercase , __lowercase = ProphetNetForConditionalGeneration.from_pretrained( lowerCamelCase_ , output_loading_info=lowerCamelCase_ ) __lowercase = ['''key_proj''', '''value_proj''', '''query_proj'''] __lowercase = { '''self_attn''': '''ngram_self_attn''', '''cross_attn''': '''encoder_attn''', '''cross_attn_layer_norm''': '''encoder_attn_layer_norm''', '''feed_forward_layer_norm''': '''final_layer_norm''', '''feed_forward''': '''''', '''intermediate''': '''fc1''', '''output''': '''fc2''', '''key_proj''': '''k_proj''', '''query_proj''': '''q_proj''', '''value_proj''': '''v_proj''', '''word_embeddings''': '''embed_tokens''', '''embeddings_layer_norm''': '''emb_layer_norm''', '''relative_pos_embeddings''': '''relative_linear''', '''ngram_embeddings''': '''ngram_input_embed''', '''position_embeddings''': '''embed_positions''', } for key in loading_info["missing_keys"]: __lowercase = key.split('''.''' ) if attributes[0] == "lm_head": __lowercase = prophet __lowercase = prophet_old else: __lowercase = prophet.prophetnet __lowercase = prophet_old.model __lowercase = False for attribute in attributes: if attribute in mapping: __lowercase = mapping[attribute] if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) > 0: __lowercase = attribute elif hasattr(lowerCamelCase_ , lowerCamelCase_ ): __lowercase = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" __lowercase = old_model.weight logger.info(f"{attribute} is initialized." ) __lowercase = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" __lowercase = old_model.bias logger.info(f"{attribute} is initialized" ) __lowercase = True break elif attribute in special_keys and hasattr(lowerCamelCase_ , '''in_proj_weight''' ): __lowercase = old_model.in_proj_weight.shape[0] // 3 __lowercase = getattr(lowerCamelCase_ , lowerCamelCase_ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": __lowercase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) __lowercase = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": __lowercase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) __lowercase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": __lowercase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) __lowercase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) __lowercase = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings." __lowercase = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] ) __lowercase = True break if attribute.isdigit(): __lowercase = model[int(lowerCamelCase_ )] __lowercase = old_model[int(lowerCamelCase_ )] else: __lowercase = getattr(lowerCamelCase_ , lowerCamelCase_ ) if old_attribute == "": __lowercase = old_model else: if not hasattr(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError(f"{old_model} does not have {old_attribute}" ) __lowercase = getattr(lowerCamelCase_ , lowerCamelCase_ ) if not is_key_init: raise ValueError(f"{key} was not correctly initialized!" ) print(f"Saving model to {pytorch_dump_folder_path}" ) prophet.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
217
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class __lowercase ( lowerCAmelCase__ ): '''simple docstring''' a : int = ["pixel_values"] def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = PILImageResampling.BILINEAR ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = True ,_lowerCamelCase = 1 / 255 ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> None: '''simple docstring''' super().__init__(**_lowerCamelCase ) __lowercase = size if size is not None else {'''shortest_edge''': 256} __lowercase = get_size_dict(_lowerCamelCase ,default_to_square=_lowerCamelCase ) __lowercase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowercase = get_size_dict(_lowerCamelCase ,param_name='''crop_size''' ) __lowercase = do_resize __lowercase = size __lowercase = resample __lowercase = do_center_crop __lowercase = crop_size __lowercase = do_rescale __lowercase = rescale_factor __lowercase = do_normalize __lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = PILImageResampling.BICUBIC ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray: '''simple docstring''' __lowercase = get_size_dict(_lowerCamelCase ,default_to_square=_lowerCamelCase ) if "shortest_edge" not in size: raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) __lowercase = get_resize_output_image_size(_lowerCamelCase ,size=size['''shortest_edge'''] ,default_to_square=_lowerCamelCase ) return resize(_lowerCamelCase ,size=_lowerCamelCase ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase ) def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray: '''simple docstring''' __lowercase = get_size_dict(_lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" ) return center_crop(_lowerCamelCase ,size=(size['''height'''], size['''width''']) ,data_format=_lowerCamelCase ,**_lowerCamelCase ) def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray: '''simple docstring''' return rescale(_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase ) def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray: '''simple docstring''' return normalize(_lowerCamelCase ,mean=_lowerCamelCase ,std=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase ) def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> Any: '''simple docstring''' __lowercase = do_resize if do_resize is not None else self.do_resize __lowercase = size if size is not None else self.size __lowercase = get_size_dict(_lowerCamelCase ,default_to_square=_lowerCamelCase ) __lowercase = resample if resample is not None else self.resample __lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase = crop_size if crop_size is not None else self.crop_size __lowercase = get_size_dict(_lowerCamelCase ,param_name='''crop_size''' ) __lowercase = do_rescale if do_rescale is not None else self.do_rescale __lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase = do_normalize if do_normalize is not None else self.do_normalize __lowercase = image_mean if image_mean is not None else self.image_mean __lowercase = image_std if image_std is not None else self.image_std __lowercase = make_list_of_images(_lowerCamelCase ) if not valid_images(_lowerCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __lowercase = [to_numpy_array(_lowerCamelCase ) for image in images] if do_resize: __lowercase = [self.resize(image=_lowerCamelCase ,size=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images] if do_center_crop: __lowercase = [self.center_crop(image=_lowerCamelCase ,size=_lowerCamelCase ) for image in images] if do_rescale: __lowercase = [self.rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ) for image in images] if do_normalize: __lowercase = [self.normalize(image=_lowerCamelCase ,mean=_lowerCamelCase ,std=_lowerCamelCase ) for image in images] __lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images] __lowercase = {'''pixel_values''': images} return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase ) def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> str: '''simple docstring''' __lowercase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(_lowerCamelCase ): __lowercase = target_sizes.numpy() __lowercase = [] for idx in range(len(_lowerCamelCase ) ): __lowercase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='''bilinear''' ,align_corners=_lowerCamelCase ) __lowercase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_lowerCamelCase ) else: __lowercase = logits.argmax(dim=1 ) __lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
217
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class lowercase__ ( unittest.TestCase): def __A ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp() # fmt: off SCREAMING_SNAKE_CASE : Any = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on SCREAMING_SNAKE_CASE : List[str] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) SCREAMING_SNAKE_CASE : Optional[Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] SCREAMING_SNAKE_CASE : Optional[Any] = {'''unk_token''': '''<unk>'''} SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCamelCase__ ) ) SCREAMING_SNAKE_CASE : str = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , UpperCamelCase__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(UpperCamelCase__ , UpperCamelCase__ ) def __A ( self : Dict , **UpperCamelCase__ : List[Any] ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def __A ( self : Dict , **UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def __A ( self : Any , **UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def __A ( self : Optional[Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __A ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] SCREAMING_SNAKE_CASE : int = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[int] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE : Any = self.get_image_processor() SCREAMING_SNAKE_CASE : List[Any] = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) processor_slow.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : str = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : List[str] = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) processor_fast.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : Dict = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ ) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ ) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ ) def __A ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 ) SCREAMING_SNAKE_CASE : List[Any] = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase__ ) def __A ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE : str = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : int = self.prepare_image_inputs() SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(UpperCamelCase__ , return_tensors='''np''' ) SCREAMING_SNAKE_CASE : str = processor(images=UpperCamelCase__ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.get_image_processor() SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE : int = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Any = '''lower newer''' SCREAMING_SNAKE_CASE : List[Any] = processor(text=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Any = tokenizer(UpperCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = '''lower newer''' SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE : int = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase__ ): processor() def __A ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.get_image_processor() SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE : int = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE : List[str] = processor.batch_decode(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.batch_decode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def __A ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor() SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[Any] = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = '''lower newer''' SCREAMING_SNAKE_CASE : int = self.prepare_image_inputs() SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
182
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class lowercase__ ( unittest.TestCase): def __A ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0 def __A ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) def __A ( self : Dict ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : str = Path(UpperCamelCase__ ) / '''preprocessor_config.json''' SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) ) SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) def __A ( self : int ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Tuple = Path(UpperCamelCase__ ) / '''preprocessor_config.json''' SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) def __A ( self : List[str] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Dict = CLIPConfig() # Create a dummy config file with image_proceesor_type SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''preprocessor_config.json''' SCREAMING_SNAKE_CASE : List[str] = Path(UpperCamelCase__ ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ ).to_dict() config_dict.pop('''image_processor_type''' ) SCREAMING_SNAKE_CASE : str = CLIPImageProcessor(**UpperCamelCase__ ) # save in new folder model_config.save_pretrained(UpperCamelCase__ ) config.save_pretrained(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ ) # make sure private variable is not incorrectly saved SCREAMING_SNAKE_CASE : List[Any] = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) def __A ( self : Tuple ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Optional[int] = Path(UpperCamelCase__ ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , ) SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) def __A ( self : int ): '''simple docstring''' with self.assertRaisesRegex( UpperCamelCase__ , '''clip-base is not a local folder and is not a valid model identifier''' ): SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained('''clip-base''' ) def __A ( self : List[str] ): '''simple docstring''' with self.assertRaisesRegex( UpperCamelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(UpperCamelCase__ , revision='''aaaaaa''' ) def __A ( self : Dict ): '''simple docstring''' with self.assertRaisesRegex( UpperCamelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def __A ( self : List[Any] ): '''simple docstring''' with self.assertRaises(UpperCamelCase__ ): SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCamelCase__ ): SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def __A ( self : Optional[Any] ): '''simple docstring''' try: AutoConfig.register('''custom''' , UpperCamelCase__ ) AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase__ ): AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Union[str, Any] = Path(UpperCamelCase__ ) / '''preprocessor_config.json''' SCREAMING_SNAKE_CASE : Any = Path(UpperCamelCase__ ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) ) SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(UpperCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def __A ( self : Any ): '''simple docstring''' class lowercase__ ( UpperCamelCase_): UpperCamelCase_ = True try: AutoConfig.register('''custom''' , UpperCamelCase__ ) AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ ) # If remote code is not set, the default is to use local SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(UpperCamelCase__ , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
182
1
import math import flax.linen as nn import jax.numpy as jnp def UpperCamelCase ( __magic_name__ : jnp.ndarray , __magic_name__ : int , __magic_name__ : float = 1 , __magic_name__ : float = 1 , __magic_name__ : float = 1.0E4 , __magic_name__ : bool = False , __magic_name__ : float = 1.0 , ) -> jnp.ndarray: """simple docstring""" assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even''' lowercase__ = float(embedding_dim // 2 ) lowercase__ = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) lowercase__ = min_timescale * jnp.exp(jnp.arange(__magic_name__ , dtype=jnp.floataa ) * -log_timescale_increment ) lowercase__ = jnp.expand_dims(__magic_name__ , 1 ) * jnp.expand_dims(__magic_name__ , 0 ) # scale embeddings lowercase__ = scale * emb if flip_sin_to_cos: lowercase__ = jnp.concatenate([jnp.cos(__magic_name__ ), jnp.sin(__magic_name__ )] , axis=1 ) else: lowercase__ = jnp.concatenate([jnp.sin(__magic_name__ ), jnp.cos(__magic_name__ )] , axis=1 ) lowercase__ = jnp.reshape(__magic_name__ , [jnp.shape(__magic_name__ )[0], embedding_dim] ) return signal class A ( nn.Module ): '''simple docstring''' A__ = 32 A__ = jnp.floataa @nn.compact def __call__(self : int , _UpperCAmelCase : Tuple ) -> int: """simple docstring""" lowercase__ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(_UpperCAmelCase ) lowercase__ = nn.silu(_UpperCAmelCase ) lowercase__ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(_UpperCAmelCase ) return temb class A ( nn.Module ): '''simple docstring''' A__ = 32 A__ = False A__ = 1 @nn.compact def __call__(self : List[str] , _UpperCAmelCase : Any ) -> Optional[Any]: """simple docstring""" return get_sinusoidal_embeddings( _UpperCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
363
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class A : '''simple docstring''' def __init__(self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Any=13 , _UpperCAmelCase : int=64 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : str=3 , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : Union[str, Any]=37 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Any=10 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Any=[1, 16, 4, 4] , _UpperCAmelCase : str=None , ) -> int: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = scope lowercase__ = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size lowercase__ = (self.image_size // 32) ** 2 lowercase__ = num_patches + 1 def lowerCamelCase__ (self : Any ) -> Optional[int]: """simple docstring""" lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ = self.get_config() return config, pixel_values, labels def lowerCamelCase__ (self : int ) -> Tuple: """simple docstring""" lowercase__ = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, """hidden_sizes""": [4, 8, 16, 32], """num_groups""": 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCAmelCase , ) def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" lowercase__ = ViTHybridModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__ = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> List[Any]: """simple docstring""" lowercase__ = self.type_sequence_label_size lowercase__ = ViTHybridForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase__ (self : int ) -> Union[str, Any]: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' A__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () A__ = ( {'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification} if is_torch_available() else {} ) A__ = False A__ = False A__ = False def lowerCamelCase__ (self : Any ) -> Any: """simple docstring""" lowercase__ = ViTHybridModelTester(self ) lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 ) def lowerCamelCase__ (self : Dict ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def lowerCamelCase__ (self : List[str] ) -> Any: """simple docstring""" pass def lowerCamelCase__ (self : Dict ) -> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) ) def lowerCamelCase__ (self : Dict ) -> Optional[Any]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(_UpperCAmelCase ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def lowerCamelCase__ (self : Optional[Any] ) -> Dict: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowerCamelCase__ (self : Tuple ) -> Optional[Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) def lowerCamelCase__ (self : Dict ) -> Any: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = _config_zero_init(_UpperCAmelCase ) for model_class in self.all_model_classes: lowercase__ = model_class(config=_UpperCAmelCase ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": lowercase__ = [f'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def lowerCamelCase__ (self : Dict ) -> List[Any]: """simple docstring""" for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = ViTHybridModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def UpperCamelCase ( ) -> Optional[int]: """simple docstring""" lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class A ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]: """simple docstring""" return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase__ (self : List[Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( _UpperCAmelCase ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): lowercase__ = model(**_UpperCAmelCase ) # verify the logits lowercase__ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) lowercase__ = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) ) @slow @require_accelerate def lowerCamelCase__ (self : Any ) -> Optional[int]: """simple docstring""" lowercase__ = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" ) lowercase__ = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" ) lowercase__ = prepare_img() lowercase__ = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ) lowercase__ = model(**_UpperCAmelCase ) lowercase__ = outputs.logits # model predicts one of the 1000 ImageNet classes lowercase__ = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
146
0
"""simple docstring""" def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :list[list[str]] = [[] for _ in range(lowercase__ )] lowerCAmelCase_ :Optional[Any] = key - 1 if key <= 0: raise ValueError("""Height of grid can't be 0 or negative""" ) if key == 1 or len(lowercase__ ) <= key: return input_string for position, character in enumerate(lowercase__ ): lowerCAmelCase_ :List[str] = position % (lowest * 2) # puts it in bounds lowerCAmelCase_ :int = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(lowercase__ ) lowerCAmelCase_ :str = ["""""".join(lowercase__ ) for row in temp_grid] lowerCAmelCase_ :Any = """""".join(lowercase__ ) return output_string def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :List[str] = [] lowerCAmelCase_ :List[Any] = key - 1 if key <= 0: raise ValueError("""Height of grid can't be 0 or negative""" ) if key == 1: return input_string lowerCAmelCase_ :list[list[str]] = [[] for _ in range(lowercase__ )] # generates template for position in range(len(lowercase__ ) ): lowerCAmelCase_ :Any = position % (lowest * 2) # puts it in bounds lowerCAmelCase_ :Dict = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("""*""" ) lowerCAmelCase_ :Tuple = 0 for row in temp_grid: # fills in the characters lowerCAmelCase_ :Dict = input_string[counter : counter + len(lowercase__ )] grid.append(list(lowercase__ ) ) counter += len(lowercase__ ) lowerCAmelCase_ :List[Any] = """""" # reads as zigzag for position in range(len(lowercase__ ) ): lowerCAmelCase_ :Tuple = position % (lowest * 2) # puts it in bounds lowerCAmelCase_ :str = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def _snake_case ( lowercase__ : str ) -> dict[int, str]: '''simple docstring''' lowerCAmelCase_ :int = {} for key_guess in range(1 , len(lowercase__ ) ): # tries every key lowerCAmelCase_ :int = decrypt(lowercase__ , lowercase__ ) return results if __name__ == "__main__": import doctest doctest.testmod()
84
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class _A : def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any: """simple docstring""" lowercase : str = parent lowercase : Optional[Any] = batch_size lowercase : Union[str, Any] = seq_length lowercase : str = is_training lowercase : str = use_input_lengths lowercase : List[Any] = use_token_type_ids lowercase : Union[str, Any] = use_labels lowercase : Tuple = gelu_activation lowercase : Dict = sinusoidal_embeddings lowercase : Any = causal lowercase : str = asm lowercase : Optional[Any] = n_langs lowercase : Dict = vocab_size lowercase : Dict = n_special lowercase : List[Any] = hidden_size lowercase : str = num_hidden_layers lowercase : int = num_attention_heads lowercase : str = hidden_dropout_prob lowercase : Dict = attention_probs_dropout_prob lowercase : List[Any] = max_position_embeddings lowercase : Optional[int] = type_sequence_label_size lowercase : List[str] = initializer_range lowercase : List[str] = num_labels lowercase : int = num_choices lowercase : int = summary_type lowercase : Tuple = use_proj lowercase : Union[str, Any] = scope lowercase : List[str] = bos_token_id def __a ( self : Any ) -> Dict: """simple docstring""" lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase : str = None if self.use_input_lengths: lowercase : int = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase : Union[str, Any] = None if self.use_token_type_ids: lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase : Union[str, Any] = None lowercase : List[str] = None lowercase : Optional[Any] = None if self.use_labels: lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float() lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) lowercase : List[Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __a ( self : Any ) -> List[Any]: """simple docstring""" return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]: """simple docstring""" lowercase : List[Any] = XLMModel(config=_A ) model.to(_A ) model.eval() lowercase : Tuple = model(_A , lengths=_A , langs=_A ) lowercase : Dict = model(_A , langs=_A ) lowercase : int = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = XLMWithLMHeadModel(_A ) model.to(_A ) model.eval() lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]: """simple docstring""" lowercase : Dict = XLMForQuestionAnsweringSimple(_A ) model.to(_A ) model.eval() lowercase : List[str] = model(_A ) lowercase : Any = model(_A , start_positions=_A , end_positions=_A ) lowercase : Any = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict: """simple docstring""" lowercase : Optional[int] = XLMForQuestionAnswering(_A ) model.to(_A ) model.eval() lowercase : Any = model(_A ) lowercase : Tuple = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , ) lowercase : Optional[int] = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , ) ((lowercase) , ) : Optional[int] = result_with_labels.to_tuple() lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A ) ((lowercase) , ) : Any = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int: """simple docstring""" lowercase : List[str] = XLMForSequenceClassification(_A ) model.to(_A ) model.eval() lowercase : List[str] = model(_A ) lowercase : Union[str, Any] = model(_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict: """simple docstring""" lowercase : Optional[Any] = self.num_labels lowercase : Tuple = XLMForTokenClassification(_A ) model.to(_A ) model.eval() lowercase : str = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]: """simple docstring""" lowercase : int = self.num_choices lowercase : List[Any] = XLMForMultipleChoice(config=_A ) model.to(_A ) model.eval() lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Dict = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase : List[Any] = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Union[str, Any] = config_and_inputs lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Any = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _UpperCamelCase : str = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _UpperCamelCase : Tuple = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]: """simple docstring""" lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowercase : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) lowercase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) return inputs_dict def __a ( self : Any ) -> List[str]: """simple docstring""" lowercase : List[str] = XLMModelTester(self ) lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 ) def __a ( self : List[Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def __a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*_A ) def __a ( self : Any ) -> Dict: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*_A ) def __a ( self : List[str] ) -> Optional[int]: """simple docstring""" lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*_A ) def __a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*_A ) def __a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*_A ) def __a ( self : Dict ) -> int: """simple docstring""" lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*_A ) def __a ( self : Any ) -> List[Any]: """simple docstring""" lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*_A ) def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any: """simple docstring""" self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(_A ): # adds PAD dummy token lowercase : List[Any] = min_length + idx + 1 lowercase : str = min_length + idx + 1 lowercase : Any = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) ) def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str: """simple docstring""" self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(_A ): # adds PAD dummy token lowercase : Union[str, Any] = min_length + idx + 1 lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , ) pass @slow def __a ( self : Optional[int] ) -> Any: """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : Any = XLMModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch class _A ( unittest.TestCase ): @slow def __a ( self : Any ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' ) model.to(_A ) lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president lowercase : List[str] = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowercase : Dict = model.generate(_A , do_sample=_A ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
308
0
from bisect import bisect from itertools import accumulate def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Any , lowerCAmelCase: Optional[int] , lowerCAmelCase: List[str] )-> int: _snake_case : int = sorted(zip(lowerCAmelCase , lowerCAmelCase ) , key=lambda lowerCAmelCase : x[0] / x[1] , reverse=lowerCAmelCase ) _snake_case , _snake_case : int = [i[0] for i in r], [i[1] for i in r] _snake_case : str = list(accumulate(lowerCAmelCase ) ) _snake_case : Union[str, Any] = bisect(lowerCAmelCase , lowerCAmelCase ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
260
import qiskit def lowerCamelCase_ ( lowerCAmelCase: int = 2 )-> qiskit.result.counts.Counts: _snake_case : Dict = qubits # Using Aer's simulator _snake_case : List[str] = qiskit.Aer.get_backend('aer_simulator' ) # Creating a Quantum Circuit acting on the q register _snake_case : Tuple = qiskit.QuantumCircuit(lowerCAmelCase , lowerCAmelCase ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , lowerCAmelCase ): # Adding CX (CNOT) gate circuit.cx(i - 1 , lowerCAmelCase ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(lowerCAmelCase ) ) , list(range(lowerCAmelCase ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator _snake_case : Any = qiskit.execute(lowerCAmelCase , lowerCAmelCase , shots=10_00 ) return job.result().get_counts(lowerCAmelCase ) if __name__ == "__main__": print(F"""Total count for various states are: {quantum_entanglement(3)}""")
260
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowercase : Dict = 16 lowercase : Dict = 32 def SCREAMING_SNAKE_CASE__ ( __A , __A = 16 ) -> str: _snake_case = AutoTokenizer.from_pretrained('bert-base-cased' ) _snake_case = load_dataset('glue' , 'mrpc' ) def tokenize_function(__A ): # max_length=None => use the model max length (it's actually the default) _snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__A , max_length=__A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _snake_case = datasets.map( __A , batched=__A , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _snake_case = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(__A ): # On TPU it's best to pad everything to the same length or training will be very slow. _snake_case = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _snake_case = 16 elif accelerator.mixed_precision != "no": _snake_case = 8 else: _snake_case = None return tokenizer.pad( __A , padding='longest' , max_length=__A , pad_to_multiple_of=__A , return_tensors='pt' , ) # Instantiate dataloaders. _snake_case = DataLoader( tokenized_datasets['train'] , shuffle=__A , collate_fn=__A , batch_size=__A ) _snake_case = DataLoader( tokenized_datasets['validation'] , shuffle=__A , collate_fn=__A , batch_size=__A ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowercase : Any = mocked_dataloaders # noqa: F811 def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Any: # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS' , __A ) == "1": _snake_case = 2 # Initialize accelerator _snake_case = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _snake_case = config['lr'] _snake_case = int(config['num_epochs'] ) _snake_case = int(config['seed'] ) _snake_case = int(config['batch_size'] ) _snake_case = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation _snake_case = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _snake_case = batch_size // MAX_GPU_BATCH_SIZE _snake_case = MAX_GPU_BATCH_SIZE set_seed(__A ) _snake_case , _snake_case = get_dataloaders(__A , __A ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _snake_case = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__A ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _snake_case = model.to(accelerator.device ) # Instantiate optimizer _snake_case = AdamW(params=model.parameters() , lr=__A ) # Instantiate scheduler _snake_case = get_linear_schedule_with_warmup( optimizer=__A , num_warmup_steps=100 , num_training_steps=(len(__A ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = accelerator.prepare( __A , __A , __A , __A , __A ) # Now we train the model for epoch in range(__A ): model.train() for step, batch in enumerate(__A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _snake_case = model(**__A ) _snake_case = outputs.loss _snake_case = loss / gradient_accumulation_steps accelerator.backward(__A ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() _snake_case = 0 for step, batch in enumerate(__A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _snake_case = model(**__A ) _snake_case = outputs.logits.argmax(dim=-1 ) _snake_case , _snake_case = accelerator.gather((predictions, batch['labels']) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(__A ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples _snake_case = predictions[: len(eval_dataloader.dataset ) - samples_seen] _snake_case = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=__A , references=__A , ) _snake_case = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , __A ) def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]: _snake_case = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=__A , default=__A , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) _snake_case = parser.parse_args() _snake_case = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(__A , __A ) if __name__ == "__main__": main()
42
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase : Tuple = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : int = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Tuple = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
42
1
'''simple docstring''' import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model _lowercase : List[str] = """0.12""" # assumed parallelism: 8 if is_torch_available(): import torch def lowerCamelCase__ ( A : str , A : str , A : List[Any]=None ): '''simple docstring''' if rng is None: UpperCAmelCase = random.Random() UpperCAmelCase = 1 for dim in shape: total_dims *= dim UpperCAmelCase = [] for _ in range(A ): values.append(rng.randint(0 , vocab_size - 1 ) ) UpperCAmelCase = np.array(A , dtype=jnp.intaa ).reshape(A ) return output def lowerCamelCase__ ( A : int , A : Optional[int]=None ): '''simple docstring''' UpperCAmelCase = ids_tensor(A , vocab_size=2 , rng=A ) # make sure that at least one token is attended to for each batch UpperCAmelCase = 1 return attn_mask @require_flax class UpperCamelCase__: __magic_name__ : Optional[int] = None __magic_name__ : Optional[Any] = () def a__( self : str )-> Optional[Any]: """simple docstring""" UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 UpperCAmelCase = 2 UpperCAmelCase = inputs['''input_ids'''].shape[-1] // 2 UpperCAmelCase = inputs['''input_ids'''][:max_batch_size, :sequence_length] UpperCAmelCase = jnp.ones_like(lowerCAmelCase ) UpperCAmelCase = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens UpperCAmelCase = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` UpperCAmelCase = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def a__( self : Dict )-> Optional[int]: """simple docstring""" UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config() UpperCAmelCase = False UpperCAmelCase = max_length UpperCAmelCase = 0 for model_class in self.all_generative_model_classes: UpperCAmelCase = model_class(lowerCAmelCase ) UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning UpperCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase ) UpperCAmelCase = pt_model_class(lowerCAmelCase ).eval() UpperCAmelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase , flax_model.params ) UpperCAmelCase = flax_model.generate(lowerCAmelCase ).sequences UpperCAmelCase = pt_model.generate(torch.tensor(lowerCAmelCase , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: UpperCAmelCase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def a__( self : Any )-> Optional[Any]: """simple docstring""" UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config() UpperCAmelCase = False UpperCAmelCase = max_length for model_class in self.all_generative_model_classes: UpperCAmelCase = model_class(lowerCAmelCase ) UpperCAmelCase = model.generate(lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase ) UpperCAmelCase = jit(model.generate ) UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def a__( self : Optional[Any] )-> int: """simple docstring""" UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config() UpperCAmelCase = True UpperCAmelCase = max_length for model_class in self.all_generative_model_classes: UpperCAmelCase = model_class(lowerCAmelCase ) UpperCAmelCase = model.generate(lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase ) UpperCAmelCase = jit(model.generate ) UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def a__( self : str )-> List[str]: """simple docstring""" UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config() UpperCAmelCase = False UpperCAmelCase = max_length UpperCAmelCase = 2 for model_class in self.all_generative_model_classes: UpperCAmelCase = model_class(lowerCAmelCase ) UpperCAmelCase = model.generate(lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase ) UpperCAmelCase = jit(model.generate ) UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def a__( self : List[Any] )-> List[str]: """simple docstring""" UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config() UpperCAmelCase = False UpperCAmelCase = max_length UpperCAmelCase = 2 UpperCAmelCase = 2 for model_class in self.all_generative_model_classes: UpperCAmelCase = model_class(lowerCAmelCase ) UpperCAmelCase = model.generate(lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def a__( self : Tuple )-> List[str]: """simple docstring""" UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config() UpperCAmelCase = True UpperCAmelCase = max_length UpperCAmelCase = 0.8 UpperCAmelCase = 10 UpperCAmelCase = 0.3 UpperCAmelCase = 1 UpperCAmelCase = 8 UpperCAmelCase = 9 for model_class in self.all_generative_model_classes: UpperCAmelCase = model_class(lowerCAmelCase ) UpperCAmelCase = model.generate(lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase ) UpperCAmelCase = jit(model.generate ) UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def a__( self : Optional[Any] )-> Optional[int]: """simple docstring""" UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config() UpperCAmelCase = max_length UpperCAmelCase = 1 UpperCAmelCase = 8 UpperCAmelCase = 9 for model_class in self.all_generative_model_classes: UpperCAmelCase = model_class(lowerCAmelCase ) UpperCAmelCase = model.generate(lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase ) UpperCAmelCase = jit(model.generate ) UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def a__( self : Tuple )-> Tuple: """simple docstring""" UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config() UpperCAmelCase = max_length UpperCAmelCase = 2 UpperCAmelCase = 1 UpperCAmelCase = 8 UpperCAmelCase = 9 for model_class in self.all_generative_model_classes: UpperCAmelCase = model_class(lowerCAmelCase ) UpperCAmelCase = model.generate(lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase ) UpperCAmelCase = jit(model.generate ) UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def a__( self : Union[str, Any] )-> Any: """simple docstring""" UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config() # pad attention mask on the left UpperCAmelCase = attention_mask.at[(0, 0)].set(0 ) UpperCAmelCase = False UpperCAmelCase = max_length for model_class in self.all_generative_model_classes: UpperCAmelCase = model_class(lowerCAmelCase ) UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase ) UpperCAmelCase = jit(model.generate ) UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def a__( self : Optional[Any] )-> int: """simple docstring""" UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config() # pad attention mask on the left UpperCAmelCase = attention_mask.at[(0, 0)].set(0 ) UpperCAmelCase = True UpperCAmelCase = max_length for model_class in self.all_generative_model_classes: UpperCAmelCase = model_class(lowerCAmelCase ) UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase ) UpperCAmelCase = jit(model.generate ) UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def a__( self : Tuple )-> Union[str, Any]: """simple docstring""" UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config() # pad attention mask on the left UpperCAmelCase = attention_mask.at[(0, 0)].set(0 ) UpperCAmelCase = 2 UpperCAmelCase = max_length for model_class in self.all_generative_model_classes: UpperCAmelCase = model_class(lowerCAmelCase ) UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase ) UpperCAmelCase = jit(model.generate ) UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class UpperCamelCase__( unittest.TestCase ): def a__( self : Union[str, Any] )-> Optional[int]: """simple docstring""" UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' ) UpperCAmelCase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) UpperCAmelCase = '''Hello world''' UpperCAmelCase = tokenizer(lowerCAmelCase , return_tensors='''np''' ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(lowerCAmelCase , '''do_samples''' ): model.generate(lowerCAmelCase , do_samples=lowerCAmelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(lowerCAmelCase , '''foo''' ): UpperCAmelCase = {'''foo''': '''bar'''} model.generate(lowerCAmelCase , **lowerCAmelCase )
91
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : int = logging.get_logger(__name__) _lowercase : Optional[Any] = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class UpperCamelCase__( lowerCAmelCase ): __magic_name__ : Union[str, Any] = "openai-gpt" __magic_name__ : Optional[int] = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Optional[int] , lowerCAmelCase : Optional[Any]=40478 , lowerCAmelCase : str=512 , lowerCAmelCase : List[Any]=768 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : int=12 , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Tuple=1E-5 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Optional[int]="cls_index" , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict=True , lowerCAmelCase : int=0.1 , **lowerCAmelCase : Optional[int] , )-> str: """simple docstring""" UpperCAmelCase = vocab_size UpperCAmelCase = n_positions UpperCAmelCase = n_embd UpperCAmelCase = n_layer UpperCAmelCase = n_head UpperCAmelCase = afn UpperCAmelCase = resid_pdrop UpperCAmelCase = embd_pdrop UpperCAmelCase = attn_pdrop UpperCAmelCase = layer_norm_epsilon UpperCAmelCase = initializer_range UpperCAmelCase = summary_type UpperCAmelCase = summary_use_proj UpperCAmelCase = summary_activation UpperCAmelCase = summary_first_dropout UpperCAmelCase = summary_proj_to_labels super().__init__(**lowerCAmelCase )
91
1
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class A__ : """simple docstring""" def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=64 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Tuple: '''simple docstring''' a__ : int = parent a__ : Any = batch_size a__ : Union[str, Any] = seq_length a__ : Dict = is_training a__ : Optional[Any] = use_input_mask a__ : str = use_token_type_ids a__ : Union[str, Any] = use_labels a__ : List[Any] = vocab_size a__ : str = hidden_size a__ : str = embedding_size a__ : Tuple = num_hidden_layers a__ : List[str] = num_attention_heads a__ : Optional[Any] = intermediate_size a__ : List[str] = hidden_act a__ : Optional[Any] = hidden_dropout_prob a__ : Optional[int] = attention_probs_dropout_prob a__ : Dict = max_position_embeddings a__ : Optional[Any] = type_vocab_size a__ : List[str] = type_sequence_label_size a__ : Tuple = initializer_range a__ : int = num_labels a__ : str = num_choices a__ : Optional[int] = scope def __lowercase ( self) -> Optional[int]: '''simple docstring''' a__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a__ : Dict = None if self.use_input_mask: a__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length]) a__ : Dict = None if self.use_token_type_ids: a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) a__ : Union[str, Any] = None a__ : Union[str, Any] = None a__ : str = None if self.use_labels: a__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) a__ : Tuple = ids_tensor([self.batch_size] , self.num_choices) a__ : List[str] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowercase ( self) -> int: '''simple docstring''' return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , ) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[int]: '''simple docstring''' a__ : Union[str, Any] = MegatronBertModel(config=lowercase) model.to(lowercase) model.eval() a__ : List[str] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase) a__ : Any = model(lowercase , token_type_ids=lowercase) a__ : List[str] = model(lowercase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[int]: '''simple docstring''' a__ : int = MegatronBertForMaskedLM(config=lowercase) model.to(lowercase) model.eval() a__ : List[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> List[Any]: '''simple docstring''' a__ : Optional[Any] = MegatronBertForCausalLM(config=lowercase) model.to(lowercase) model.eval() a__ : Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Any: '''simple docstring''' a__ : Optional[int] = MegatronBertForNextSentencePrediction(config=lowercase) model.to(lowercase) model.eval() a__ : Any = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Dict: '''simple docstring''' a__ : Tuple = MegatronBertForPreTraining(config=lowercase) model.to(lowercase) model.eval() a__ : Optional[Any] = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , next_sentence_label=lowercase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[int]: '''simple docstring''' a__ : List[str] = MegatronBertForQuestionAnswering(config=lowercase) model.to(lowercase) model.eval() a__ : str = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> List[str]: '''simple docstring''' a__ : Optional[Any] = self.num_labels a__ : Optional[int] = MegatronBertForSequenceClassification(lowercase) model.to(lowercase) model.eval() a__ : int = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[int]: '''simple docstring''' a__ : Any = self.num_labels a__ : Dict = MegatronBertForTokenClassification(config=lowercase) model.to(lowercase) model.eval() a__ : Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Dict: '''simple docstring''' a__ : Any = self.num_choices a__ : Any = MegatronBertForMultipleChoice(config=lowercase) model.to(lowercase) model.eval() a__ : Any = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() a__ : Any = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() a__ : List[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() a__ : Dict = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def __lowercase ( self) -> Tuple: '''simple docstring''' a__ : Dict = self.prepare_config_and_inputs() ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ) : Optional[int] = config_and_inputs a__ : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): """simple docstring""" __A : str = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) __A : List[Any] = ( { '''feature-extraction''': MegatronBertModel, '''fill-mask''': MegatronBertForMaskedLM, '''question-answering''': MegatronBertForQuestionAnswering, '''text-classification''': MegatronBertForSequenceClassification, '''text-generation''': MegatronBertForCausalLM, '''token-classification''': MegatronBertForTokenClassification, '''zero-shot''': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) __A : List[Any] = True # test_resize_embeddings = False __A : Tuple = False def __lowercase ( self , lowercase , lowercase , lowercase=False) -> Optional[int]: '''simple docstring''' a__ : List[Any] = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase) if return_labels: if model_class in get_values(lowercase): a__ : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase) a__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase) return inputs_dict def __lowercase ( self) -> Optional[Any]: '''simple docstring''' a__ : int = MegatronBertModelTester(self) a__ : List[Any] = ConfigTester(self , config_class=lowercase , hidden_size=37) def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def __lowercase ( self) -> int: '''simple docstring''' a__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*lowercase) def __lowercase ( self) -> int: '''simple docstring''' a__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowercase) def __lowercase ( self) -> Optional[int]: '''simple docstring''' a__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowercase) def __lowercase ( self) -> List[str]: '''simple docstring''' a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowercase) def __lowercase ( self) -> Dict: '''simple docstring''' a__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowercase) def __lowercase ( self) -> List[str]: '''simple docstring''' a__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowercase) def __lowercase ( self) -> Optional[Any]: '''simple docstring''' a__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowercase) def __lowercase ( self) -> Any: '''simple docstring''' a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowercase) def A_ ( A__ ) -> Any: return torch.tensor( A__ , dtype=torch.long , device=A__ , ) lowercase : Any = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class A__ ( unittest.TestCase ): """simple docstring""" @slow @unittest.skip('Model is not available.') def __lowercase ( self) -> List[Any]: '''simple docstring''' a__ : List[str] = 'nvidia/megatron-bert-uncased-345m' if "MYDIR" in os.environ: a__ : Optional[int] = os.path.join(os.environ['MYDIR'] , lowercase) a__ : str = MegatronBertModel.from_pretrained(lowercase) model.to(lowercase) model.half() a__ : List[str] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]]) with torch.no_grad(): a__ : int = model(lowercase)[0] a__ : Optional[Any] = torch.Size((1, 9, 1024)) self.assertEqual(output.shape , lowercase) a__ : Tuple = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28] for ii in range(3): for jj in range(3): a__ : Optional[Any] = output[0, ii, jj] a__ : Tuple = expected[3 * ii + jj] a__ : Union[str, Any] = 'ii={} jj={} a={} b={}'.format(lowercase , lowercase , lowercase , lowercase) self.assertTrue(math.isclose(lowercase , lowercase , rel_tol=lowercase , abs_tol=lowercase) , msg=lowercase)
99
"""simple docstring""" import qiskit def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" A_ : Tuple = qiskit.Aer.get_backend('aer_simulator' ) A_ : str = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator A_ : Optional[Any] = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(_UpperCAmelCase ) if __name__ == "__main__": lowerCamelCase_ : List[str] = half_adder(1, 1) print(F"Half Adder Output Qubit Counts: {counts}")
286
0
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def snake_case (UpperCAmelCase__ ) -> List[str]: UpperCamelCase_: int = [False] * len(UpperCAmelCase__ ) UpperCamelCase_: Any = [-1] * len(UpperCAmelCase__ ) def dfs(UpperCAmelCase__ , UpperCAmelCase__ ): UpperCamelCase_: Tuple = True UpperCamelCase_: Optional[int] = c for u in graph[v]: if not visited[u]: dfs(UpperCAmelCase__ , 1 - c ) for i in range(len(UpperCAmelCase__ ) ): if not visited[i]: dfs(UpperCAmelCase__ , 0 ) for i in range(len(UpperCAmelCase__ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph A_ : Dict = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
292
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _lowerCAmelCase( unittest.TestCase ): """simple docstring""" def _a ( self ): UpperCamelCase_: Any = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) UpperCamelCase_: List[str] = get_activation('gelu' ) self.assertTrue(torch.allclose(gelu_python(_lowerCamelCase ) , torch_builtin(_lowerCamelCase ) ) ) self.assertFalse(torch.allclose(gelu_python(_lowerCamelCase ) , gelu_new(_lowerCamelCase ) ) ) def _a ( self ): UpperCamelCase_: Optional[Any] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) UpperCamelCase_: Union[str, Any] = get_activation('gelu' ) UpperCamelCase_: int = get_activation('gelu_10' ) UpperCamelCase_: Union[str, Any] = torch_builtin(_lowerCamelCase ) UpperCamelCase_: List[str] = geluaa(_lowerCamelCase ) UpperCamelCase_: Dict = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 ) self.assertTrue(torch.max(_lowerCamelCase ).item() == 1_0.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def _a ( self ): get_activation('gelu' ) get_activation('gelu_10' ) get_activation('gelu_fast' ) get_activation('gelu_new' ) get_activation('gelu_python' ) get_activation('gelu_pytorch_tanh' ) get_activation('linear' ) get_activation('mish' ) get_activation('quick_gelu' ) get_activation('relu' ) get_activation('sigmoid' ) get_activation('silu' ) get_activation('swish' ) get_activation('tanh' ) with self.assertRaises(_lowerCamelCase ): get_activation('bogus' ) with self.assertRaises(_lowerCamelCase ): get_activation(_lowerCamelCase ) def _a ( self ): UpperCamelCase_: str = get_activation('gelu' ) UpperCamelCase_: str = 1 UpperCamelCase_: int = get_activation('gelu' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(_lowerCamelCase ): UpperCamelCase_: Tuple = acta.a
292
1
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler") class UpperCAmelCase : '''simple docstring''' def __init__( self : List[Any] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : List[str] = True , __lowercase : List[str] = False ): """simple docstring""" snake_case_ = scheduler snake_case_ = optimizers if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) else [optimizers] snake_case_ = split_batches snake_case_ = step_with_optimizer snake_case_ = GradientState() def snake_case__ ( self : int , *__lowercase : List[Any] , **__lowercase : Optional[int] ): """simple docstring""" if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step snake_case_ = AcceleratorState().num_processes for _ in range(_SCREAMING_SNAKE_CASE ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , "total_steps" ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) else: self.scheduler.step(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def snake_case__ ( self : Dict ): """simple docstring""" return self.scheduler.get_last_lr() def snake_case__ ( self : Any ): """simple docstring""" return self.scheduler.state_dict() def snake_case__ ( self : List[str] , __lowercase : int ): """simple docstring""" self.scheduler.load_state_dict(_SCREAMING_SNAKE_CASE ) def snake_case__ ( self : Optional[Any] ): """simple docstring""" return self.scheduler.get_lr() def snake_case__ ( self : int , *__lowercase : int , **__lowercase : Tuple ): """simple docstring""" return self.scheduler.print_lr(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
187
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20} _UpperCAmelCase = do_thumbnail _UpperCAmelCase = do_align_axis _UpperCAmelCase = do_pad _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : List[str] = DonutImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DonutImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @is_flaky() def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
329
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE__ = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["""BeitFeatureExtractor"""] SCREAMING_SNAKE_CASE__ = ["""BeitImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BeitForImageClassification""", """BeitForMaskedImageModeling""", """BeitForSemanticSegmentation""", """BeitModel""", """BeitPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """FlaxBeitForImageClassification""", """FlaxBeitForMaskedImageModeling""", """FlaxBeitModel""", """FlaxBeitPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
350
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ): '''simple docstring''' if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): raise TypeError("Input value must be a 'int' type" ) return bin(__lowerCamelCase ).count("1" ) if __name__ == "__main__": import doctest doctest.testmod()
297
0
class lowerCAmelCase_ : def __init__( self, SCREAMING_SNAKE_CASE_ ) -> None: UpperCamelCase : int = size UpperCamelCase : Union[str, Any] = [0] * size UpperCamelCase : str = [0] * size @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int: return index | (index + 1) @staticmethod def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int: return (index & (index + 1)) - 1 def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: UpperCamelCase : Dict = value while index < self.size: UpperCamelCase : Optional[int] = self.get_prev(SCREAMING_SNAKE_CASE_ ) + 1 if current_left_border == index: UpperCamelCase : Any = value else: UpperCamelCase : Union[str, Any] = max(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = self.get_next(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int: right -= 1 # Because of right is exclusive UpperCamelCase : Any = 0 while left <= right: UpperCamelCase : Optional[int] = self.get_prev(SCREAMING_SNAKE_CASE_ ) if left <= current_left: UpperCamelCase : Union[str, Any] = max(SCREAMING_SNAKE_CASE_, self.tree[right] ) UpperCamelCase : List[str] = current_left else: UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_, self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
119
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType __UpperCAmelCase = None __UpperCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>''' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image __UpperCAmelCase = [ np.dtype('''|b1'''), np.dtype('''|u1'''), np.dtype('''<u2'''), np.dtype('''>u2'''), np.dtype('''<i2'''), np.dtype('''>i2'''), np.dtype('''<u4'''), np.dtype('''>u4'''), np.dtype('''<i4'''), np.dtype('''>i4'''), np.dtype('''<f4'''), np.dtype('''>f4'''), np.dtype('''<f8'''), np.dtype('''>f8'''), ] @dataclass class lowerCAmelCase_ : UpperCAmelCase__ : bool = True UpperCAmelCase__ : Optional[str] = None # Automatically constructed UpperCAmelCase__ : ClassVar[str] = "PIL.Image.Image" UpperCAmelCase__ : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} ) UpperCAmelCase__ : str = field(default="Image" , init=a__ , repr=a__ ) def __call__( self ) -> Any: return self.pa_type def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): return {"path": value, "bytes": None} elif isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): return {"path": None, "bytes": value} elif isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(SCREAMING_SNAKE_CASE_ ) elif value.get('path' ) is not None and os.path.isfile(value['path'] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('path' )} elif value.get('bytes' ) is not None or value.get('path' ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('bytes' ), "path": value.get('path' )} else: raise ValueError( F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> "PIL.Image.Image": if not self.decode: raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support decoding images, please install \'Pillow\'.' ) if token_per_repo_id is None: UpperCamelCase : Any = {} UpperCamelCase , UpperCamelCase : Union[str, Any] = value['path'], value['bytes'] if bytes_ is None: if path is None: raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" ) else: if is_local_path(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : int = PIL.Image.open(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : int = path.split('::' )[-1] try: UpperCamelCase : Optional[Any] = string_to_dict(SCREAMING_SNAKE_CASE_, config.HUB_DATASETS_URL )['repo_id'] UpperCamelCase : str = token_per_repo_id.get(SCREAMING_SNAKE_CASE_ ) except ValueError: UpperCamelCase : Tuple = None with xopen(SCREAMING_SNAKE_CASE_, 'rb', use_auth_token=SCREAMING_SNAKE_CASE_ ) as f: UpperCamelCase : Optional[int] = BytesIO(f.read() ) UpperCamelCase : int = PIL.Image.open(bytes_ ) else: UpperCamelCase : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def snake_case_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return ( self if self.decode else { "bytes": Value('binary' ), "path": Value('string' ), } ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> pa.StructArray: if pa.types.is_string(storage.type ): UpperCamelCase : List[str] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ), type=pa.binary() ) UpperCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, storage], ['bytes', 'path'], mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): UpperCamelCase : Optional[int] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ), type=pa.string() ) UpperCamelCase : Union[str, Any] = pa.StructArray.from_arrays([storage, path_array], ['bytes', 'path'], mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('bytes' ) >= 0: UpperCamelCase : List[str] = storage.field('bytes' ) else: UpperCamelCase : List[str] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ), type=pa.binary() ) if storage.type.get_field_index('path' ) >= 0: UpperCamelCase : List[str] = storage.field('path' ) else: UpperCamelCase : Optional[Any] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ), type=pa.string() ) UpperCamelCase : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array], ['bytes', 'path'], mask=storage.is_null() ) elif pa.types.is_list(storage.type ): UpperCamelCase : Optional[Any] = pa.array( [encode_np_array(np.array(SCREAMING_SNAKE_CASE_ ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), ) UpperCamelCase : List[str] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ), type=pa.string() ) UpperCamelCase : int = pa.StructArray.from_arrays( [bytes_array, path_array], ['bytes', 'path'], mask=bytes_array.is_null() ) return array_cast(SCREAMING_SNAKE_CASE_, self.pa_type ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(SCREAMING_SNAKE_CASE_ ): with xopen(SCREAMING_SNAKE_CASE_, 'rb' ) as f: UpperCamelCase : Optional[int] = f.read() return bytes_ UpperCamelCase : Union[str, Any] = pa.array( [ (path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) UpperCamelCase : Any = pa.array( [os.path.basename(SCREAMING_SNAKE_CASE_ ) if path is not None else None for path in storage.field('path' ).to_pylist()], type=pa.string(), ) UpperCamelCase : int = pa.StructArray.from_arrays([bytes_array, path_array], ['bytes', 'path'], mask=bytes_array.is_null() ) return array_cast(SCREAMING_SNAKE_CASE_, self.pa_type ) def UpperCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() UpperCamelCase : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def UpperCamelCase ( snake_case__ : "PIL.Image.Image" ) -> bytes: UpperCamelCase : Any = BytesIO() if image.format in list_image_compression_formats(): UpperCamelCase : Tuple = image.format else: UpperCamelCase : List[str] = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF' image.save(snake_case__ , format=snake_case__ ) return buffer.getvalue() def UpperCamelCase ( snake_case__ : "PIL.Image.Image" ) -> dict: if hasattr(snake_case__ , 'filename' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(snake_case__ )} def UpperCamelCase ( snake_case__ : np.ndarray ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) UpperCamelCase : Union[str, Any] = array.dtype UpperCamelCase : List[Any] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER UpperCamelCase : Optional[Any] = dtype.kind UpperCamelCase : Any = dtype.itemsize UpperCamelCase : int = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: UpperCamelCase : Optional[Any] = np.dtype('|u1' ) if dtype_kind not in ["u", "i"]: raise TypeError( F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: UpperCamelCase : List[Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: UpperCamelCase : Dict = dtype_byteorder + dtype_kind + str(snake_case__ ) UpperCamelCase : str = np.dtype(snake_case__ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) UpperCamelCase : Union[str, Any] = PIL.Image.fromarray(array.astype(snake_case__ ) ) return {"path": None, "bytes": image_to_bytes(snake_case__ )} def UpperCamelCase ( snake_case__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if objs: UpperCamelCase , UpperCamelCase : Union[str, Any] = first_non_null_value(snake_case__ ) if isinstance(snake_case__ , snake_case__ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(snake_case__ , np.ndarray ): UpperCamelCase : List[Any] = no_op_if_value_is_null(snake_case__ ) return [obj_to_image_dict_func(snake_case__ ) for obj in objs] elif isinstance(snake_case__ , PIL.Image.Image ): UpperCamelCase : Optional[int] = no_op_if_value_is_null(snake_case__ ) return [obj_to_image_dict_func(snake_case__ ) for obj in objs] else: return objs else: return objs
119
1
"""simple docstring""" from math import factorial UpperCAmelCase: dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__UpperCAmelCase ) ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 60 , __UpperCAmelCase = 1000000 ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length _lowercase : str = 0 # the cached sizes of the previous chains _lowercase : dict[int, int] = {} for start_chain_element in range(1 , __UpperCAmelCase ): # The temporary set will contain the elements of the chain _lowercase : Optional[int] = set() _lowercase : List[Any] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. _lowercase : Optional[int] = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__UpperCAmelCase ) chain_set_length += 1 _lowercase : Optional[Any] = digit_factorial_sum(__UpperCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] _lowercase : List[str] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution()}')
336
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ): import pyspark def generate_fn(): _lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: _lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" ) _lowercase : int = partition_df.collect() _lowercase : Dict = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class UpperCamelCase ( _BaseExamplesIterable ): """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,): _lowercase : Union[str, Any] = df _lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() ) _lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order ) def __iter__( self ): yield from self.generate_examples_fn() def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(UpperCAmelCase_ ) return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ ) return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ ) @property def lowerCamelCase__ ( self ): return len(self.partition_order ) class UpperCamelCase ( datasets.DatasetBuilder ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = SparkConfig def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): import pyspark _lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate() _lowercase : List[Any] = df _lowercase : int = working_dir super().__init__( cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,) def lowerCamelCase__ ( self ): # Returns the path of the created file. def create_cache_and_write_probe(UpperCAmelCase_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ ) _lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(UpperCAmelCase_ ,"""a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _lowercase : List[str] = ( self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def lowerCamelCase__ ( self ): return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowerCamelCase__ ( self ,UpperCAmelCase_ ): import pyspark def get_arrow_batch_size(UpperCAmelCase_ ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) _lowercase : List[str] = self.df.count() _lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _lowercase : Union[str, Any] = ( self.df.limit(UpperCAmelCase_ ) .repartition(1 ) .mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) _lowercase : List[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) ) _lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): import pyspark _lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter _lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath _lowercase : Any = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _lowercase : Union[str, Any] = self.config.features _lowercase : Optional[int] = self._writer_batch_size _lowercase : Optional[Any] = self._fs.storage_options def write_arrow(UpperCAmelCase_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _lowercase : Any = pyspark.TaskContext().taskAttemptId() _lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) _lowercase : List[Any] = 0 _lowercase : int = writer_class( features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,) _lowercase : Optional[int] = pa.Table.from_batches([first_batch] ) writer.write_table(UpperCAmelCase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _lowercase , _lowercase : Optional[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) shard_id += 1 _lowercase : Union[str, Any] = writer_class( features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,) _lowercase : Dict = pa.Table.from_batches([batch] ) writer.write_table(UpperCAmelCase_ ) if writer._num_bytes > 0: _lowercase , _lowercase : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) if working_fpath != fpath: for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ): _lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) ) shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ ) _lowercase : List[str] = ( self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): self._validate_cache_dir() _lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(UpperCAmelCase_ ) _lowercase : Optional[int] = not is_remote_filesystem(self._fs ) _lowercase : Dict = os.path.join if is_local else posixpath.join _lowercase : int = """-TTTTT-SSSSS-of-NNNNN""" _lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" _lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ ) _lowercase : List[Any] = 0 _lowercase : Optional[Any] = 0 _lowercase : int = 0 _lowercase : Any = [] _lowercase : Any = [] for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : Tuple = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(UpperCAmelCase_ ) _lowercase : Optional[int] = total_num_examples _lowercase : List[Any] = total_num_bytes # should rename everything at the end logger.debug(f"""Renaming {total_shards} shards.""" ) if total_shards > 1: _lowercase : List[Any] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _lowercase : Union[str, Any] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): rename( UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,) _lowercase : Optional[Any] = [] _lowercase : List[str] = 0 for i in range(len(UpperCAmelCase_ ) ): _lowercase , _lowercase : List[str] = task_id_and_num_shards[i] for shard_id in range(UpperCAmelCase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect() else: # don't use any pattern _lowercase : Tuple = 0 _lowercase : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,): return SparkExamplesIterable(self.df )
336
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _a = logging.get_logger(__name__) if is_vision_available(): import PIL class A_ ( snake_case__ ): _lowercase : Optional[int] = ['pixel_values'] def __init__( self : int , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : bool = True , **UpperCAmelCase : Any , ) -> None: super().__init__(**UpperCAmelCase ) __lowerCAmelCase: Optional[Any] = size if size is not None else {'shortest_edge': 2_2_4} __lowerCAmelCase: Tuple = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) __lowerCAmelCase: Dict = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} __lowerCAmelCase: Union[str, Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase , param_name='crop_size' ) __lowerCAmelCase: Tuple = do_resize __lowerCAmelCase: int = size __lowerCAmelCase: Union[str, Any] = resample __lowerCAmelCase: Optional[int] = do_center_crop __lowerCAmelCase: Dict = crop_size __lowerCAmelCase: Dict = do_rescale __lowerCAmelCase: List[str] = rescale_factor __lowerCAmelCase: Tuple = do_normalize __lowerCAmelCase: Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowerCAmelCase: int = image_std if image_std is not None else OPENAI_CLIP_STD __lowerCAmelCase: str = do_convert_rgb def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray: __lowerCAmelCase: List[Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __lowerCAmelCase: Optional[Any] = get_resize_output_image_size(UpperCAmelCase , size=size['shortest_edge'] , default_to_square=UpperCAmelCase ) return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Any , ) -> np.ndarray: __lowerCAmelCase: Tuple = get_size_dict(UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCAmelCase , size=(size['height'], size['width']) , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> str: return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Union[str, Any] , ) -> np.ndarray: return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : int = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase : int , ) -> PIL.Image.Image: __lowerCAmelCase: Union[str, Any] = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase: List[str] = size if size is not None else self.size __lowerCAmelCase: Dict = get_size_dict(UpperCAmelCase , param_name='size' , default_to_square=UpperCAmelCase ) __lowerCAmelCase: Union[str, Any] = resample if resample is not None else self.resample __lowerCAmelCase: Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCAmelCase: Dict = crop_size if crop_size is not None else self.crop_size __lowerCAmelCase: str = get_size_dict(UpperCAmelCase , param_name='crop_size' , default_to_square=UpperCAmelCase ) __lowerCAmelCase: Any = do_rescale if do_rescale is not None else self.do_rescale __lowerCAmelCase: Dict = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCAmelCase: List[str] = do_normalize if do_normalize is not None else self.do_normalize __lowerCAmelCase: Tuple = image_mean if image_mean is not None else self.image_mean __lowerCAmelCase: List[str] = image_std if image_std is not None else self.image_std __lowerCAmelCase: List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowerCAmelCase: List[Any] = make_list_of_images(UpperCAmelCase ) if not valid_images(UpperCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowerCAmelCase: List[str] = [convert_to_rgb(UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. __lowerCAmelCase: Tuple = [to_numpy_array(UpperCAmelCase ) for image in images] if do_resize: __lowerCAmelCase: Optional[int] = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images] if do_center_crop: __lowerCAmelCase: int = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images] if do_rescale: __lowerCAmelCase: Dict = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images] if do_normalize: __lowerCAmelCase: Any = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images] __lowerCAmelCase: Tuple = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images] __lowerCAmelCase: List[str] = {'pixel_values': images} return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
322
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A_ ( snake_case__ ): _lowercase : int = (DPMSolverSinglestepScheduler,) _lowercase : Optional[Any] = (('num_inference_steps', 2_5),) def UpperCAmelCase ( self : Dict , **UpperCAmelCase : List[Any] ) -> Optional[Any]: __lowerCAmelCase: Union[str, Any] = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, 'prediction_type': 'epsilon', 'thresholding': False, 'sample_max_value': 1.0, 'algorithm_type': 'dpmsolver++', 'solver_type': 'midpoint', 'lambda_min_clipped': -float('inf' ), 'variance_type': None, } config.update(**UpperCAmelCase ) return config def UpperCAmelCase ( self : str , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : str ) -> Any: __lowerCAmelCase: Optional[int] = dict(self.forward_default_kwargs ) __lowerCAmelCase: int = kwargs.pop('num_inference_steps' , UpperCAmelCase ) __lowerCAmelCase: int = self.dummy_sample __lowerCAmelCase: Union[str, Any] = 0.1 * sample __lowerCAmelCase: str = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __lowerCAmelCase: Union[str, Any] = self.get_scheduler_config(**UpperCAmelCase ) __lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase ) scheduler.set_timesteps(UpperCAmelCase ) # copy over dummy past residuals __lowerCAmelCase: Optional[int] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase ) __lowerCAmelCase: Dict = scheduler_class.from_pretrained(UpperCAmelCase ) new_scheduler.set_timesteps(UpperCAmelCase ) # copy over dummy past residuals __lowerCAmelCase: Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order] __lowerCAmelCase , __lowerCAmelCase: Optional[int] = sample, sample for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ): __lowerCAmelCase: str = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample __lowerCAmelCase: str = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCAmelCase ( self : str ) -> str: pass def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Any=0 , **UpperCAmelCase : Optional[int] ) -> Tuple: __lowerCAmelCase: Tuple = dict(self.forward_default_kwargs ) __lowerCAmelCase: Tuple = kwargs.pop('num_inference_steps' , UpperCAmelCase ) __lowerCAmelCase: Tuple = self.dummy_sample __lowerCAmelCase: Union[str, Any] = 0.1 * sample __lowerCAmelCase: Tuple = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __lowerCAmelCase: Dict = self.get_scheduler_config() __lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase ) scheduler.set_timesteps(UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) __lowerCAmelCase: List[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase ) __lowerCAmelCase: List[str] = scheduler_class.from_pretrained(UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) __lowerCAmelCase: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] __lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample __lowerCAmelCase: Dict = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCAmelCase ( self : int , UpperCAmelCase : Dict=None , **UpperCAmelCase : List[str] ) -> Union[str, Any]: if scheduler is None: __lowerCAmelCase: str = self.scheduler_classes[0] __lowerCAmelCase: int = self.get_scheduler_config(**UpperCAmelCase ) __lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase ) __lowerCAmelCase: List[Any] = self.scheduler_classes[0] __lowerCAmelCase: List[str] = self.get_scheduler_config(**UpperCAmelCase ) __lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase ) __lowerCAmelCase: List[Any] = 1_0 __lowerCAmelCase: Dict = self.dummy_model() __lowerCAmelCase: Dict = self.dummy_sample_deter scheduler.set_timesteps(UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): __lowerCAmelCase: Dict = model(UpperCAmelCase , UpperCAmelCase ) __lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample return sample def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: __lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) __lowerCAmelCase: Any = 5_0 __lowerCAmelCase: int = self.dummy_model() __lowerCAmelCase: List[str] = self.dummy_sample_deter scheduler.set_timesteps(UpperCAmelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): __lowerCAmelCase: List[Any] = model(UpperCAmelCase , UpperCAmelCase ) __lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample __lowerCAmelCase: Optional[int] = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2574 ) < 1E-3 def UpperCAmelCase ( self : Optional[int] ) -> Dict: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCAmelCase ) def UpperCAmelCase ( self : Optional[Any] ) -> Any: # make sure that iterating over schedulers with same config names gives same results # for defaults __lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) __lowerCAmelCase: Dict = self.full_loop(scheduler=UpperCAmelCase ) __lowerCAmelCase: Optional[Any] = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 __lowerCAmelCase: Tuple = DEISMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase: List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase: Any = UniPCMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase: Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config ) __lowerCAmelCase: Union[str, Any] = self.full_loop(scheduler=UpperCAmelCase ) __lowerCAmelCase: List[Any] = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def UpperCAmelCase ( self : List[str] ) -> List[str]: self.check_over_configs(thresholding=UpperCAmelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='dpmsolver++' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , ) def UpperCAmelCase ( self : Any ) -> Union[str, Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase ) def UpperCAmelCase ( self : Tuple ) -> str: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , ) __lowerCAmelCase: Dict = self.full_loop( solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , ) assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers" def UpperCAmelCase ( self : Optional[Any] ) -> str: self.check_over_configs(lower_order_final=UpperCAmelCase ) self.check_over_configs(lower_order_final=UpperCAmelCase ) def UpperCAmelCase ( self : str ) -> Any: self.check_over_configs(lambda_min_clipped=-float('inf' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def UpperCAmelCase ( self : List[Any] ) -> str: self.check_over_configs(variance_type=UpperCAmelCase ) self.check_over_configs(variance_type='learned_range' ) def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 ) def UpperCAmelCase ( self : Any ) -> int: __lowerCAmelCase: Any = self.full_loop() __lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def UpperCAmelCase ( self : Any ) -> Union[str, Any]: __lowerCAmelCase: List[str] = self.full_loop(use_karras_sigmas=UpperCAmelCase ) __lowerCAmelCase: str = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2248 ) < 1E-3 def UpperCAmelCase ( self : Dict ) -> Optional[Any]: __lowerCAmelCase: Tuple = self.full_loop(prediction_type='v_prediction' ) __lowerCAmelCase: List[str] = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_mean.item() - 0.1453 ) < 1E-3 def UpperCAmelCase ( self : str ) -> List[str]: __lowerCAmelCase: int = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=UpperCAmelCase ) __lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_mean.item() - 0.0649 ) < 1E-3 def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: __lowerCAmelCase: Any = self.scheduler_classes[0] __lowerCAmelCase: Optional[Any] = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 ) __lowerCAmelCase: List[str] = scheduler_class(**UpperCAmelCase ) __lowerCAmelCase: Optional[int] = 1_0 __lowerCAmelCase: Union[str, Any] = self.dummy_model() __lowerCAmelCase: int = self.dummy_sample_deter.half() scheduler.set_timesteps(UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): __lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase ) __lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample assert sample.dtype == torch.floataa
322
1
"""simple docstring""" import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex __SCREAMING_SNAKE_CASE =logging.getLogger(__name__) class UpperCamelCase : def __init__( self ) -> Tuple: '''simple docstring''' lowercase_ : Dict = False def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any: '''simple docstring''' if not self.initialized: lowercase_ : Optional[Any] = RagRetriever( __UpperCamelCase ,question_encoder_tokenizer=__UpperCamelCase ,generator_tokenizer=__UpperCamelCase ,index=__UpperCamelCase ,init_retrieval=__UpperCamelCase ,) lowercase_ : Dict = True def _UpperCAmelCase ( self ) -> int: '''simple docstring''' self.retriever.index.init_index() def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ , lowercase_ : Any = self.retriever._main_retrieve(__UpperCamelCase ,__UpperCamelCase ) return doc_ids, retrieved_doc_embeds class UpperCamelCase ( lowercase_ ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ) -> Optional[int]: '''simple docstring''' if index is not None and index.is_initialized() and len(__UpperCamelCase ) > 0: raise ValueError( 'When using Ray for distributed fine-tuning, ' 'you\'ll need to provide the paths instead, ' 'as the dataset and the index are loaded ' 'separately. More info in examples/rag/use_own_knowledge_dataset.py ' ) super().__init__( __UpperCamelCase ,question_encoder_tokenizer=__UpperCamelCase ,generator_tokenizer=__UpperCamelCase ,index=__UpperCamelCase ,init_retrieval=__UpperCamelCase ,) lowercase_ : List[Any] = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) for worker in self.retrieval_workers ] ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' logger.info('initializing retrieval' ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. lowercase_ : Any = self.retrieval_workers[random.randint(0 ,len(self.retrieval_workers ) - 1 )] lowercase_ , lowercase_ : Optional[int] = ray.get(random_worker.retrieve.remote(__UpperCamelCase ,__UpperCamelCase ) ) else: lowercase_ , lowercase_ : int = self._main_retrieve(__UpperCamelCase ,__UpperCamelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCamelCase ) @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return super(__UpperCamelCase ,cls ).get_tokenizers(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> List[Any]: '''simple docstring''' lowercase_ : Tuple = kwargs.pop('config' ,__UpperCamelCase ) or RagConfig.from_pretrained(__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[Any] = RagTokenizer.from_pretrained(__UpperCamelCase ,config=__UpperCamelCase ) lowercase_ : str = rag_tokenizer.question_encoder lowercase_ : Dict = rag_tokenizer.generator if indexed_dataset is not None: lowercase_ : Union[str, Any] = 'custom' lowercase_ : List[Any] = CustomHFIndex(config.retrieval_vector_size ,__UpperCamelCase ) else: lowercase_ : Union[str, Any] = cls._build_index(__UpperCamelCase ) return cls( __UpperCamelCase ,question_encoder_tokenizer=__UpperCamelCase ,generator_tokenizer=__UpperCamelCase ,retrieval_workers=__UpperCamelCase ,index=__UpperCamelCase ,)
321
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations_with_dp_array( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase_ : str = sum( count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE ) for item in array ) lowercase_ : Tuple = answer return answer lowercase_ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = [0] * (target + 1) lowercase_ : Dict = 1 for i in range(1 , target + 1 ): for j in range(__SCREAMING_SNAKE_CASE ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE =3 __SCREAMING_SNAKE_CASE =5 __SCREAMING_SNAKE_CASE =[1, 2, 5] print(combination_sum_iv(n, array, target))
321
1
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def A_ ( A__ ) -> Any: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A__ ( nn.Module ): """simple docstring""" def __init__( self , lowercase , lowercase) -> Optional[int]: '''simple docstring''' super().__init__() a__ : int = module a__ : Dict = nn.Sequential( nn.Linear(module.in_features , lowercase , bias=lowercase) , nn.Linear(lowercase , module.out_features , bias=lowercase) , ) a__ : Dict = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=lowercase) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def __lowercase ( self , lowercase , *lowercase , **lowercase) -> Any: '''simple docstring''' return self.module(lowercase , *lowercase , **lowercase) + self.adapter(lowercase) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" __A : Tuple = '''bigscience/bloom-1b7''' # Constant values __A : str = 2.109_6595_5269_2574 __A : List[Any] = '''Hello my name is''' __A : List[str] = set() EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' ) EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' ) EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' ) __A : List[Any] = 1_0 def __lowercase ( self) -> str: '''simple docstring''' a__ : Union[str, Any] = AutoTokenizer.from_pretrained(self.model_name) class A__ ( __UpperCAmelCase ): """simple docstring""" def __lowercase ( self) -> int: '''simple docstring''' super().setUp() # Models and tokenizer a__ : str = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto') a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowercase , device_map='auto') def __lowercase ( self) -> Any: '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def __lowercase ( self) -> Optional[int]: '''simple docstring''' a__ : Dict = self.model_abit.config self.assertTrue(hasattr(lowercase , 'quantization_config')) a__ : str = config.to_dict() a__ : Dict = config.to_diff_dict() a__ : Dict = config.to_json_string() def __lowercase ( self) -> Optional[Any]: '''simple docstring''' from bitsandbytes.nn import Paramsabit a__ : Optional[Any] = self.model_fpaa.get_memory_footprint() a__ : Any = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE) a__ : Optional[int] = get_some_linear_layer(self.model_abit) self.assertTrue(linear.weight.__class__ == Paramsabit) def __lowercase ( self) -> str: '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(lowercase , torch.nn.Linear): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta) def __lowercase ( self) -> Optional[int]: '''simple docstring''' a__ : List[str] = self.tokenizer(self.input_text , return_tensors='pt') a__ : List[str] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowercase) , self.EXPECTED_OUTPUTS) def __lowercase ( self) -> int: '''simple docstring''' a__ : Dict = BitsAndBytesConfig() a__ : Union[str, Any] = True a__ : Optional[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowercase , device_map='auto') a__ : Any = self.tokenizer(self.input_text , return_tensors='pt') a__ : List[str] = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowercase) , self.EXPECTED_OUTPUTS) def __lowercase ( self) -> Any: '''simple docstring''' with self.assertRaises(lowercase), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(lowercase) def __lowercase ( self) -> str: '''simple docstring''' a__ : Any = BitsAndBytesConfig() with self.assertRaises(lowercase): a__ : Any = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowercase , load_in_abit=lowercase , device_map='auto' , bnb_abit_quant_type='nf4' , ) def __lowercase ( self) -> Optional[Any]: '''simple docstring''' with self.assertRaises(lowercase): # Tries with `str` self.model_abit.to('cpu') with self.assertRaises(lowercase): # Tries with a `dtype`` self.model_abit.to(torch.floataa) with self.assertRaises(lowercase): # Tries with a `device` self.model_abit.to(torch.device('cuda:0')) with self.assertRaises(lowercase): # Tries with a `device` self.model_abit.float() with self.assertRaises(lowercase): # Tries with a `device` self.model_abit.half() # Test if we did not break anything a__ : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt') a__ : Optional[Any] = self.model_fpaa.to(torch.floataa) a__ : int = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10) # Check this does not throw an error a__ : Tuple = self.model_fpaa.to('cpu') # Check this does not throw an error a__ : List[str] = self.model_fpaa.half() # Check this does not throw an error a__ : Any = self.model_fpaa.float() def __lowercase ( self) -> Optional[Any]: '''simple docstring''' a__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=lowercase , device_map='auto') self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" @classmethod def __lowercase ( cls) -> List[str]: '''simple docstring''' a__ : Union[str, Any] = 't5-small' a__ : str = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense a__ : Dict = AutoTokenizer.from_pretrained(cls.model_name) a__ : List[str] = 'Translate in German: Hello, my dog is cute' def __lowercase ( self) -> List[Any]: '''simple docstring''' gc.collect() torch.cuda.empty_cache() def __lowercase ( self) -> List[Any]: '''simple docstring''' from transformers import TaForConditionalGeneration a__ : Tuple = TaForConditionalGeneration._keep_in_fpaa_modules a__ : Any = None # test with `t5-small` a__ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowercase , device_map='auto') a__ : Any = self.tokenizer(self.input_text , return_tensors='pt').to(0) a__ : Optional[Any] = model.generate(**lowercase) # test with `flan-t5-small` a__ : Dict = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowercase , device_map='auto') a__ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt').to(0) a__ : Union[str, Any] = model.generate(**lowercase) a__ : Dict = modules def __lowercase ( self) -> List[str]: '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` a__ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowercase , device_map='auto') # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit)) a__ : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt').to(0) a__ : Union[str, Any] = model.generate(**lowercase) # test with `flan-t5-small` a__ : int = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowercase , device_map='auto') a__ : str = self.tokenizer(self.input_text , return_tensors='pt').to(0) a__ : Any = model.generate(**lowercase) class A__ ( __UpperCAmelCase ): """simple docstring""" def __lowercase ( self) -> Tuple: '''simple docstring''' super().setUp() # model_name a__ : List[Any] = 'bigscience/bloom-560m' a__ : str = 't5-small' # Different types of model a__ : Optional[int] = AutoModel.from_pretrained(self.model_name , load_in_abit=lowercase , device_map='auto') # Sequence classification model a__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=lowercase , device_map='auto') # CausalLM model a__ : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowercase , device_map='auto') # Seq2seq model a__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=lowercase , device_map='auto') def __lowercase ( self) -> List[Any]: '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def __lowercase ( self) -> List[str]: '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) class A__ ( __UpperCAmelCase ): """simple docstring""" def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' super().setUp() def __lowercase ( self) -> Optional[int]: '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def __lowercase ( self) -> str: '''simple docstring''' a__ : Optional[Any] = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass a__ : int = self.pipe(self.input_text) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS) @require_torch_multi_gpu class A__ ( __UpperCAmelCase ): """simple docstring""" def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' super().setUp() def __lowercase ( self) -> int: '''simple docstring''' a__ : Optional[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=lowercase , device_map='balanced') # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1}) # Check that inference pass works on the model a__ : Tuple = self.tokenizer(self.input_text , return_tensors='pt') # Second real batch a__ : Optional[Any] = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowercase) , self.EXPECTED_OUTPUTS) class A__ ( __UpperCAmelCase ): """simple docstring""" def __lowercase ( self) -> Optional[Any]: '''simple docstring''' a__ : Optional[int] = 'facebook/opt-350m' super().setUp() def __lowercase ( self) -> List[Any]: '''simple docstring''' if version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.37.0'): return # Step 1: freeze all parameters a__ : Optional[int] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowercase) self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()}) for param in model.parameters(): a__ : Any = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability a__ : List[Any] = param.data.to(torch.floataa) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(lowercase)): a__ : Optional[int] = LoRALayer(module.q_proj , rank=16) a__ : Optional[Any] = LoRALayer(module.k_proj , rank=16) a__ : Optional[Any] = LoRALayer(module.v_proj , rank=16) # Step 3: dummy batch a__ : Optional[int] = self.tokenizer('Test batch ' , return_tensors='pt').to(0) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): a__ : int = model.forward(**lowercase) out.logits.norm().backward() for module in model.modules(): if isinstance(lowercase , lowercase): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(lowercase , nn.Embedding): self.assertTrue(module.weight.grad is None) class A__ ( __UpperCAmelCase ): """simple docstring""" __A : Tuple = '''gpt2-xl''' __A : Dict = 3.3191_8548_5415_2187
99
import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def A_ ( A__ ) -> float: return np.dot(A__ , A__ ) class A__ : """simple docstring""" def __init__( self , *, lowercase = np.inf , lowercase = "linear" , lowercase = 0.0 , ) -> None: '''simple docstring''' a__ : Tuple = regularization a__ : Optional[Any] = gamma if kernel == "linear": a__ : Optional[Any] = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('rbf kernel requires gamma') if not isinstance(self.gamma , (float, int)): raise ValueError('gamma must be float or int') if not self.gamma > 0: raise ValueError('gamma must be > 0') a__ : str = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: a__ : Optional[int] = F'Unknown kernel: {kernel}' raise ValueError(lowercase) def __lowercase ( self , lowercase , lowercase) -> float: '''simple docstring''' return np.dot(lowercase , lowercase) def __lowercase ( self , lowercase , lowercase) -> float: '''simple docstring''' return np.exp(-(self.gamma * norm_squared(vectora - vectora))) def __lowercase ( self , lowercase , lowercase) -> None: '''simple docstring''' a__ : List[str] = observations a__ : Dict = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((a__) , ) : Optional[int] = np.shape(lowercase) def to_minimize(lowercase) -> float: a__ : Tuple = 0 ((a__) , ) : Optional[int] = np.shape(lowercase) for i in range(lowercase): for j in range(lowercase): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j]) ) return 1 / 2 * s - sum(lowercase) a__ : Optional[Any] = LinearConstraint(lowercase , 0 , 0) a__ : str = Bounds(0 , self.regularization) a__ : List[str] = minimize( lowercase , np.ones(lowercase) , bounds=lowercase , constraints=[ly_contraint]).x a__ : Dict = l_star # calculating mean offset of separation plane to points a__ : int = 0 for i in range(lowercase): for j in range(lowercase): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j]) a__ : List[str] = s / n def __lowercase ( self , lowercase) -> int: '''simple docstring''' a__ : int = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , lowercase) for n in range(len(self.classes))) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
99
1
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING lowerCAmelCase__ :Dict = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase__ ) class __a ( lowerCAmelCase__ ): def __init__( self , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" super().__init__(**_SCREAMING_SNAKE_CASE ) requires_backends(self , 'vision' ) requires_backends(self , 'torch' ) if self.framework != "pt": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) self.check_model_type(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , **_SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = {} _UpperCAmelCase = {} # preprocess args if "points_per_batch" in kwargs: _UpperCAmelCase = kwargs['points_per_batch'] if "points_per_crop" in kwargs: _UpperCAmelCase = kwargs['points_per_crop'] if "crops_n_layers" in kwargs: _UpperCAmelCase = kwargs['crops_n_layers'] if "crop_overlap_ratio" in kwargs: _UpperCAmelCase = kwargs['crop_overlap_ratio'] if "crop_n_points_downscale_factor" in kwargs: _UpperCAmelCase = kwargs['crop_n_points_downscale_factor'] # postprocess args if "pred_iou_thresh" in kwargs: _UpperCAmelCase = kwargs['pred_iou_thresh'] if "stability_score_offset" in kwargs: _UpperCAmelCase = kwargs['stability_score_offset'] if "mask_threshold" in kwargs: _UpperCAmelCase = kwargs['mask_threshold'] if "stability_score_thresh" in kwargs: _UpperCAmelCase = kwargs['stability_score_thresh'] if "crops_nms_thresh" in kwargs: _UpperCAmelCase = kwargs['crops_nms_thresh'] if "output_rle_mask" in kwargs: _UpperCAmelCase = kwargs['output_rle_mask'] if "output_bboxes_mask" in kwargs: _UpperCAmelCase = kwargs['output_bboxes_mask'] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" return super().__call__(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , num_workers=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 512 / 1500 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 1 , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.image_processor.size['longest_edge'] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.image_processor.generate_crop_boxes( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) with self.device_placement(): if self.framework == "pt": _UpperCAmelCase = self.get_inference_context() with inference_context(): _UpperCAmelCase = self._ensure_tensor_on_device(_SCREAMING_SNAKE_CASE , device=self.device ) _UpperCAmelCase = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) ) _UpperCAmelCase = image_embeddings _UpperCAmelCase = grid_points.shape[1] _UpperCAmelCase = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( 'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ' 'To return all points at once, set points_per_batch to None' ) for i in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = grid_points[:, i : i + points_per_batch, :, :] _UpperCAmelCase = input_labels[:, i : i + points_per_batch] _UpperCAmelCase = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.88 , _SCREAMING_SNAKE_CASE=0.95 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , ) -> Dict: """simple docstring""" _UpperCAmelCase = model_inputs.pop('input_boxes' ) _UpperCAmelCase = model_inputs.pop('is_last' ) _UpperCAmelCase = model_inputs.pop('original_sizes' ).tolist() _UpperCAmelCase = model_inputs.pop('reshaped_input_sizes' ).tolist() _UpperCAmelCase = self.model(**_SCREAMING_SNAKE_CASE ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks _UpperCAmelCase = model_outputs['pred_masks'] _UpperCAmelCase = self.image_processor.post_process_masks( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , binarize=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model_outputs['iou_scores'] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.7 , ) -> int: """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] for model_output in model_outputs: all_scores.append(model_output.pop('iou_scores' ) ) all_masks.extend(model_output.pop('masks' ) ) all_boxes.append(model_output.pop('boxes' ) ) _UpperCAmelCase = torch.cat(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.cat(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.image_processor.post_process_for_mask_generation( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = defaultdict(_SCREAMING_SNAKE_CASE ) for output in model_outputs: for k, v in output.items(): extra[k].append(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = {} if output_rle_mask: _UpperCAmelCase = rle_mask if output_bboxes_mask: _UpperCAmelCase = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
355
from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def lowerCAmelCase__ ( a__: Sequence[float] , a__: int , a__: int ) -> tuple[int | None, int | None, float]: '''simple docstring''' if not arr: return None, None, 0 if low == high: return low, high, arr[low] _UpperCAmelCase = (low + high) // 2 _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_subarray(a__ , a__ , a__ ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_subarray(a__ , mid + 1 , a__ ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_cross_sum(a__ , a__ , a__ , a__ ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def lowerCAmelCase__ ( a__: Sequence[float] , a__: int , a__: int , a__: int ) -> tuple[int, int, float]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = float('-inf' ), -1 _UpperCAmelCase , _UpperCAmelCase = float('-inf' ), -1 _UpperCAmelCase = 0 for i in range(a__ , low - 1 , -1 ): summ += arr[i] if summ > left_sum: _UpperCAmelCase = summ _UpperCAmelCase = i _UpperCAmelCase = 0 for i in range(mid + 1 , high + 1 ): summ += arr[i] if summ > right_sum: _UpperCAmelCase = summ _UpperCAmelCase = i return max_left, max_right, (left_sum + right_sum) def lowerCAmelCase__ ( a__: int ) -> float: '''simple docstring''' _UpperCAmelCase = [randint(1 , a__ ) for _ in range(a__ )] _UpperCAmelCase = time.time() max_subarray(a__ , 0 , input_size - 1 ) _UpperCAmelCase = time.time() return end - start def lowerCAmelCase__ ( ) -> None: '''simple docstring''' _UpperCAmelCase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0] _UpperCAmelCase = [time_max_subarray(a__ ) for input_size in input_sizes] print('No of Inputs\t\tTime Taken' ) for input_size, runtime in zip(a__ , a__ ): print(a__ , '\t\t' , a__ ) plt.plot(a__ , a__ ) plt.xlabel('Number of Inputs' ) plt.ylabel('Time taken in seconds' ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
185
0
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer lowercase : Dict = ["""bert-base-uncased""", """bert-base-cased"""] lowercase : Optional[Any] = """hf-internal-testing/tiny-bert-tf-only""" if is_tf_available(): class A__ ( tf.keras.Model ): """simple docstring""" def __init__( self , lowercase) -> Dict: '''simple docstring''' super().__init__() a__ : Tuple = tokenizer a__ : int = AutoConfig.from_pretrained(lowercase) a__ : Dict = TFAutoModel.from_config(lowercase) def __lowercase ( self , lowercase) -> Tuple: '''simple docstring''' a__ : List[Any] = self.tokenizer(lowercase) a__ : Dict = self.bert(**lowercase) return out["pooler_output"] @require_tf @require_tensorflow_text class A__ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self) -> List[Any]: '''simple docstring''' super().setUp() a__ : int = [ BertTokenizer.from_pretrained(lowercase) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false a__ : Union[str, Any] = [TFBertTokenizer.from_pretrained(lowercase) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(lowercase , use_fast_bert_tokenizer=lowercase) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers) == len(self.tf_tokenizers) a__ : Tuple = [ 'This is a straightforward English test sentence.', 'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.', 'Now we\'re going to add some Chinese: 一 二 三 一二三', 'And some much more rare Chinese: 齉 堃 齉堃', 'Je vais aussi écrire en français pour tester les accents', 'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ', ] a__ : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1])) def __lowercase ( self) -> Dict: '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers): for test_inputs in (self.test_sentences, self.paired_sentences): a__ : Tuple = tokenizer(lowercase , return_tensors='tf' , padding='longest') a__ : Any = tf_tokenizer(lowercase) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape)) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key])) @slow def __lowercase ( self) -> Optional[Any]: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: a__ : Dict = tf_tokenizer(self.paired_sentences) a__ : Tuple = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key])) @slow def __lowercase ( self) -> int: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: a__ : Tuple = tf.function(lowercase) for test_inputs in (self.test_sentences, self.paired_sentences): a__ : Optional[int] = tf.constant(lowercase) a__ : Optional[Any] = compiled_tokenizer(lowercase) a__ : Union[str, Any] = tf_tokenizer(lowercase) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def __lowercase ( self) -> str: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: a__ : str = ModelToSave(tokenizer=lowercase) a__ : str = tf.convert_to_tensor(self.test_sentences) a__ : List[Any] = model(lowercase) # Build model with some sample inputs with TemporaryDirectory() as tempdir: a__ : Optional[int] = Path(lowercase) / 'saved.model' model.save(lowercase) a__ : Any = tf.keras.models.load_model(lowercase) a__ : List[Any] = loaded_model(lowercase) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
99
def A_ ( A__ , A__ ) -> str: if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) a__ : List[str] = str(bin(A__ ) )[2:] # remove the leading "0b" a__ : Optional[int] = str(bin(A__ ) )[2:] # remove the leading "0b" a__ : List[str] = max(len(A__ ) , len(A__ ) ) return "0b" + "".join( str(int(char_a == '1' and char_b == '1' ) ) for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
99
1
'''simple docstring''' import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def a ( __a , __a ) -> int: '''simple docstring''' UpperCamelCase__ :Optional[Any] = checkpoint UpperCamelCase__ :Optional[int] = {} UpperCamelCase__ :Any = vae_state_dict['''encoder.conv_in.weight'''] UpperCamelCase__ :Union[str, Any] = vae_state_dict['''encoder.conv_in.bias'''] UpperCamelCase__ :List[Any] = vae_state_dict['''encoder.conv_out.weight'''] UpperCamelCase__ :List[str] = vae_state_dict['''encoder.conv_out.bias'''] UpperCamelCase__ :Optional[Any] = vae_state_dict['''encoder.norm_out.weight'''] UpperCamelCase__ :Any = vae_state_dict['''encoder.norm_out.bias'''] UpperCamelCase__ :Any = vae_state_dict['''decoder.conv_in.weight'''] UpperCamelCase__ :List[Any] = vae_state_dict['''decoder.conv_in.bias'''] UpperCamelCase__ :Union[str, Any] = vae_state_dict['''decoder.conv_out.weight'''] UpperCamelCase__ :Union[str, Any] = vae_state_dict['''decoder.conv_out.bias'''] UpperCamelCase__ :Any = vae_state_dict['''decoder.norm_out.weight'''] UpperCamelCase__ :List[str] = vae_state_dict['''decoder.norm_out.bias'''] UpperCamelCase__ :Tuple = vae_state_dict['''quant_conv.weight'''] UpperCamelCase__ :Optional[int] = vae_state_dict['''quant_conv.bias'''] UpperCamelCase__ :List[str] = vae_state_dict['''post_quant_conv.weight'''] UpperCamelCase__ :int = vae_state_dict['''post_quant_conv.bias'''] # Retrieves the keys for the encoder down blocks only UpperCamelCase__ :List[Any] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} ) UpperCamelCase__ :int = { layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(__a ) } # Retrieves the keys for the decoder up blocks only UpperCamelCase__ :Tuple = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} ) UpperCamelCase__ :Optional[Any] = { layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(__a ) } for i in range(__a ): UpperCamelCase__ :Union[str, Any] = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key] if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict: UpperCamelCase__ :Dict = vae_state_dict.pop( f'''encoder.down.{i}.downsample.conv.weight''' ) UpperCamelCase__ :str = vae_state_dict.pop( f'''encoder.down.{i}.downsample.conv.bias''' ) UpperCamelCase__ :Optional[Any] = renew_vae_resnet_paths(__a ) UpperCamelCase__ :Union[str, Any] = {'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) UpperCamelCase__ :Optional[Any] = [key for key in vae_state_dict if '''encoder.mid.block''' in key] UpperCamelCase__ :Dict = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCamelCase__ :Dict = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key] UpperCamelCase__ :Optional[Any] = renew_vae_resnet_paths(__a ) UpperCamelCase__ :Dict = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) UpperCamelCase__ :str = [key for key in vae_state_dict if '''encoder.mid.attn''' in key] UpperCamelCase__ :Union[str, Any] = renew_vae_attention_paths(__a ) UpperCamelCase__ :int = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) for i in range(__a ): UpperCamelCase__ :int = num_up_blocks - 1 - i UpperCamelCase__ :int = [ key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key ] if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict: UpperCamelCase__ :Tuple = vae_state_dict[ f'''decoder.up.{block_id}.upsample.conv.weight''' ] UpperCamelCase__ :Dict = vae_state_dict[ f'''decoder.up.{block_id}.upsample.conv.bias''' ] UpperCamelCase__ :Union[str, Any] = renew_vae_resnet_paths(__a ) UpperCamelCase__ :Union[str, Any] = {'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) UpperCamelCase__ :List[str] = [key for key in vae_state_dict if '''decoder.mid.block''' in key] UpperCamelCase__ :Tuple = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCamelCase__ :int = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key] UpperCamelCase__ :int = renew_vae_resnet_paths(__a ) UpperCamelCase__ :str = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) UpperCamelCase__ :Any = [key for key in vae_state_dict if '''decoder.mid.attn''' in key] UpperCamelCase__ :Any = renew_vae_attention_paths(__a ) UpperCamelCase__ :List[Any] = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) return new_checkpoint def a ( __a , __a , ) -> List[str]: '''simple docstring''' UpperCamelCase__ :Optional[int] = requests.get( ''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' ) UpperCamelCase__ :Union[str, Any] = io.BytesIO(r.content ) UpperCamelCase__ :Any = OmegaConf.load(__a ) UpperCamelCase__ :List[Any] = 512 UpperCamelCase__ :int = '''cuda''' if torch.cuda.is_available() else '''cpu''' if checkpoint_path.endswith('''safetensors''' ): from safetensors import safe_open UpperCamelCase__ :Union[str, Any] = {} with safe_open(__a , framework='''pt''' , device='''cpu''' ) as f: for key in f.keys(): UpperCamelCase__ :List[Any] = f.get_tensor(__a ) else: UpperCamelCase__ :List[str] = torch.load(__a , map_location=__a )['''state_dict'''] # Convert the VAE model. UpperCamelCase__ :str = create_vae_diffusers_config(__a , image_size=__a ) UpperCamelCase__ :Union[str, Any] = custom_convert_ldm_vae_checkpoint(__a , __a ) UpperCamelCase__ :Dict = AutoencoderKL(**__a ) vae.load_state_dict(__a ) vae.save_pretrained(__a ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') __snake_case = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
350
'''simple docstring''' from math import ceil def a ( __a , __a ) -> Any: '''simple docstring''' UpperCamelCase__ :str = list(range(0 , __a ) ) UpperCamelCase__ :Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check UpperCamelCase__ :Optional[int] = [] for i in device_map_blocks: if device_map_blocks.count(__a ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(__a ) # Missing blocks UpperCamelCase__ :List[str] = [i for i in blocks if i not in device_map_blocks] UpperCamelCase__ :Optional[Any] = [i for i in device_map_blocks if i not in blocks] if len(__a ) != 0: raise ValueError( '''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.''' ''' These attention blocks were specified more than once: ''' + str(__a ) ) if len(__a ) != 0: raise ValueError( '''There are attention blocks for this model that are not specified in the device_map. Add these attention ''' '''blocks to a device on the device_map: ''' + str(__a ) ) if len(__a ) != 0: raise ValueError( '''The device_map contains more attention blocks than this model has. Remove these from the device_map:''' + str(__a ) ) def a ( __a , __a ) -> Tuple: '''simple docstring''' UpperCamelCase__ :Optional[Any] = list(range(__a ) ) UpperCamelCase__ :Any = int(ceil(n_layers / len(__a ) ) ) UpperCamelCase__ :List[Any] = [layers[i : i + n_blocks] for i in range(0 , __a , __a )] return dict(zip(__a , __a ) )
219
0
"""simple docstring""" def __lowerCAmelCase ( lowercase : int ) -> bool: """simple docstring""" snake_case : List[str] = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
203
"""simple docstring""" from typing import Dict from .base import GenericTensor, Pipeline class _lowerCAmelCase ( snake_case_ ): def lowerCamelCase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' if tokenize_kwargs is None: snake_case : Optional[Any] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( "truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" ) snake_case : List[str] = truncation snake_case : Union[str, Any] = tokenize_kwargs snake_case : List[Any] = {} if return_tensors is not None: snake_case : Tuple = return_tensors return preprocess_params, {}, postprocess_params def lowerCamelCase ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict[str, GenericTensor]: '''simple docstring''' snake_case : List[Any] = self.framework snake_case : str = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) return model_inputs def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple: '''simple docstring''' snake_case : int = self.model(**UpperCamelCase__ ) return model_outputs def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Union[str, Any]: '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
203
1
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case : def __init__( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int=1_3 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=9_9 , UpperCamelCase__ : List[Any]=2_4 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Union[str, Any]=6 , UpperCamelCase__ : Dict=3_7 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : Dict=1_6 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=1_0_0_0 , )-> int: '''simple docstring''' __lowerCAmelCase: Any = parent __lowerCAmelCase: Any = batch_size __lowerCAmelCase: List[Any] = seq_length __lowerCAmelCase: Optional[Any] = is_training __lowerCAmelCase: List[Any] = use_input_mask __lowerCAmelCase: List[Any] = use_token_type_ids __lowerCAmelCase: Union[str, Any] = use_labels __lowerCAmelCase: Optional[int] = vocab_size __lowerCAmelCase: Union[str, Any] = hidden_size __lowerCAmelCase: Optional[Any] = num_hidden_layers __lowerCAmelCase: Optional[int] = num_attention_heads __lowerCAmelCase: List[Any] = intermediate_size __lowerCAmelCase: Dict = hidden_act __lowerCAmelCase: List[Any] = hidden_dropout_prob __lowerCAmelCase: Union[str, Any] = attention_probs_dropout_prob __lowerCAmelCase: str = max_position_embeddings __lowerCAmelCase: List[str] = type_vocab_size __lowerCAmelCase: Optional[Any] = type_sequence_label_size __lowerCAmelCase: str = initializer_range __lowerCAmelCase: int = num_labels __lowerCAmelCase: Union[str, Any] = scope __lowerCAmelCase: Any = range_bbox def lowercase_ ( self : Dict)-> List[str]: '''simple docstring''' __lowerCAmelCase: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __lowerCAmelCase: Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: __lowerCAmelCase: List[str] = bbox[i, j, 3] __lowerCAmelCase: Union[str, Any] = bbox[i, j, 1] __lowerCAmelCase: int = t if bbox[i, j, 2] < bbox[i, j, 0]: __lowerCAmelCase: Dict = bbox[i, j, 2] __lowerCAmelCase: List[str] = bbox[i, j, 0] __lowerCAmelCase: Optional[int] = t __lowerCAmelCase: Optional[int] = None if self.use_input_mask: __lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) __lowerCAmelCase: List[Any] = None if self.use_token_type_ids: __lowerCAmelCase: int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __lowerCAmelCase: str = None __lowerCAmelCase: Union[str, Any] = None if self.use_labels: __lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size) __lowerCAmelCase: Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __lowerCAmelCase: Dict = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def lowercase_ ( self : Dict)-> List[Any]: '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowercase_ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , )-> Optional[int]: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = LiltModel(config=UpperCamelCase__) model.to(UpperCamelCase__) model.eval() __lowerCAmelCase: Dict = model(UpperCamelCase__ , bbox=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__) __lowerCAmelCase: List[Any] = model(UpperCamelCase__ , bbox=UpperCamelCase__ , token_type_ids=UpperCamelCase__) __lowerCAmelCase: Tuple = model(UpperCamelCase__ , bbox=UpperCamelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , )-> Dict: '''simple docstring''' __lowerCAmelCase: List[Any] = self.num_labels __lowerCAmelCase: Tuple = LiltForTokenClassification(config=UpperCamelCase__) model.to(UpperCamelCase__) model.eval() __lowerCAmelCase: str = model( UpperCamelCase__ , bbox=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def lowercase_ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Any , )-> Union[str, Any]: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = LiltForQuestionAnswering(config=UpperCamelCase__) model.to(UpperCamelCase__) model.eval() __lowerCAmelCase: List[str] = model( UpperCamelCase__ , bbox=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def lowercase_ ( self : Optional[Any])-> Optional[int]: '''simple docstring''' __lowerCAmelCase: str = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ): Optional[int] = config_and_inputs __lowerCAmelCase: List[Any] = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class snake_case ( __snake_case, __snake_case, __snake_case, unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Any = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ : Any = False SCREAMING_SNAKE_CASE_ : Tuple = False def lowercase_ ( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int)-> Dict: '''simple docstring''' return True def lowercase_ ( self : List[Any])-> Dict: '''simple docstring''' __lowerCAmelCase: Dict = LiltModelTester(self) __lowerCAmelCase: Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7) def lowercase_ ( self : Tuple)-> Any: '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : List[Any])-> Union[str, Any]: '''simple docstring''' __lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__) def lowercase_ ( self : str)-> int: '''simple docstring''' __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCAmelCase: List[Any] = type self.model_tester.create_and_check_model(*UpperCamelCase__) def lowercase_ ( self : str)-> Union[str, Any]: '''simple docstring''' __lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__) def lowercase_ ( self : Tuple)-> int: '''simple docstring''' __lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__) @slow def lowercase_ ( self : Dict)-> List[str]: '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase: Any = LiltModel.from_pretrained(UpperCamelCase__) self.assertIsNotNone(UpperCamelCase__) @require_torch @slow class snake_case ( unittest.TestCase ): def lowercase_ ( self : Dict)-> Any: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(UpperCamelCase__) __lowerCAmelCase: Optional[int] = torch.tensor([[1, 2]] , device=UpperCamelCase__) __lowerCAmelCase: int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCamelCase__) # forward pass with torch.no_grad(): __lowerCAmelCase: str = model(input_ids=UpperCamelCase__ , bbox=UpperCamelCase__) __lowerCAmelCase: Optional[int] = torch.Size([1, 2, 7_6_8]) __lowerCAmelCase: Optional[int] = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=UpperCamelCase__ , ) self.assertTrue(outputs.last_hidden_state.shape , UpperCamelCase__) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCamelCase__ , atol=1e-3))
108
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) __A = { "configuration_speecht5": [ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5HifiGanConfig", ], "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], "processing_speecht5": ["SpeechT5Processor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["SpeechT5Tokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5Model", "SpeechT5PreTrainedModel", "SpeechT5HifiGan", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
108
1
import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class __A ( lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = WavaVecaPhonemeCTCTokenizer lowerCAmelCase_ = False def __lowerCamelCase ( self ): '''simple docstring''' super().setUp() lowerCamelCase__ = ( '''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː ''' '''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː ''' '''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 ''' '''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ ''' '''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ ''' '''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ ''' '''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ''' '''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ ''' '''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ ''' '''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ ''' '''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ ''' '''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ ''' '''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4''' ).split(''' ''' ) lowerCamelCase__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) lowerCamelCase__ = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''} lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=2_0 , __lowerCAmelCase=5 ): '''simple docstring''' lowerCamelCase__ = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase )) for i in range(len(__lowerCAmelCase ) )] lowerCamelCase__ = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__lowerCAmelCase ) , __lowerCAmelCase ) ) if max_length is not None and len(__lowerCAmelCase ) > max_length: lowerCamelCase__ = toks[:max_length] if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0: while len(__lowerCAmelCase ) < min_length: lowerCamelCase__ = toks + toks # toks_str = [t[1] for t in toks] lowerCamelCase__ = [t[0] for t in toks] # Ensure consistency lowerCamelCase__ = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) if " " not in output_txt and len(__lowerCAmelCase ) > 1: lowerCamelCase__ = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase ) ) if with_prefix_space: lowerCamelCase__ = ''' ''' + output_txt lowerCamelCase__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) return output_txt, output_ids def __lowerCamelCase ( self , **__lowerCAmelCase ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) # check adding a single token tokenizer.add_tokens('''xxx''' ) lowerCamelCase__ = tokenizer('''m xxx ɪ''' , do_phonemize=__lowerCAmelCase ).input_ids self.assertEqual(__lowerCAmelCase , [1_3, 3_9_2, 1_7] ) # xxx should be last token tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] ) lowerCamelCase__ = tokenizer('''m aaa ɪ ccc''' , do_phonemize=__lowerCAmelCase ).input_ids self.assertEqual(__lowerCAmelCase , [1_3, 3_9_3, 1_7, 3_9_5] ) # aaa and ccc should be after xxx and 2 after aaa lowerCamelCase__ = tokenizer('''maɪ c''' , do_phonemize=__lowerCAmelCase ).input_ids self.assertEqual(__lowerCAmelCase , [3, 2_0_0] ) # mai should be <unk> (=3) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) lowerCamelCase__ = '''Hello how are you''' lowerCamelCase__ = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang='''en-us''' ) self.assertEqual(__lowerCAmelCase , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) lowerCamelCase__ = '''Hello how are you''' lowerCamelCase__ = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(__lowerCAmelCase ).input_ids , tokenizer(__lowerCAmelCase , do_phonemize=__lowerCAmelCase ).input_ids ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) lowerCamelCase__ = '''Hello how are you''' lowerCamelCase__ = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang='''en-us''' ) lowerCamelCase__ = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) lowerCamelCase__ = [ [1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8], [2_4, 2_2, 5, 2_4, 2_2, 5, 7_7], ] lowerCamelCase__ = tokenizer.decode(sample_ids[0] ) lowerCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , batch_tokens[0] ) self.assertEqual(__lowerCAmelCase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) lowerCamelCase__ = '''Hello how are you''' lowerCamelCase__ = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang='''en-us''' ) self.assertEqual(__lowerCAmelCase , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) lowerCamelCase__ = '''Hello how are you''' lowerCamelCase__ = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(__lowerCAmelCase ).input_ids , tokenizer(__lowerCAmelCase , do_phonemize=__lowerCAmelCase ).input_ids ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off lowerCamelCase__ = [ [1_1, 5, 1_5, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 1_5, 8, tokenizer.word_delimiter_token_id, 9_8], [tokenizer.word_delimiter_token_id, 2_4, 2_2, tokenizer.word_delimiter_token_id, 5, 2_4, 2_2, 5, 7_7], ] # fmt: on # decode with word_del_token filter lowerCamelCase__ = tokenizer.decode(sample_ids[0] ) lowerCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , batch_tokens[0] ) self.assertEqual(__lowerCAmelCase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) # decode with no word_del_token filter lowerCamelCase__ = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase , filter_word_delimiter_token=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , batch_tokens[0] ) self.assertEqual(__lowerCAmelCase , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) lowerCamelCase__ = '''Hello how are you''' lowerCamelCase__ = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang='''en-us''' ) lowerCamelCase__ = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids , filter_word_delimiter_token=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) lowerCamelCase__ = '''Hello how are you''' lowerCamelCase__ = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang='''en-us''' ) lowerCamelCase__ = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids , filter_word_delimiter_token=__lowerCAmelCase ) self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , __lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=__lowerCAmelCase ) lowerCamelCase__ = '''Hello how are you''' lowerCamelCase__ = tokenizer(__lowerCAmelCase , phonemizer_lang='''en-us''' ).input_ids lowerCamelCase__ = tokenizer(__lowerCAmelCase , phonemizer_lang='''fr-fr''' ).input_ids self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = tokenizer.decode(__lowerCAmelCase ) lowerCamelCase__ = tokenizer.decode(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) self.assertEqual(__lowerCAmelCase , '''ɛ l o h aʊ a ʁ j u''' ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) lowerCamelCase__ = '''Hello how Are you''' lowerCamelCase__ = '''hello how are you''' lowerCamelCase__ = tokenizer(__lowerCAmelCase ).input_ids lowerCamelCase__ = tokenizer(__lowerCAmelCase ).input_ids self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) tokenizer.add_tokens(['''!''', '''?'''] ) tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} ) # fmt: off lowerCamelCase__ = [ [1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8, 3_9_2, 3_9_2, 3_9_3, 3_9_2, 3_9_2, 3_9_3, 3_9_4, 3_9_4], [2_4, 2_2, 5, 2_4, 2_2, 5, 7_7, tokenizer.pad_token_id, 3_9_4, 3_9_4], ] # fmt: on lowerCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] ) @staticmethod def __lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = [d[key] for d in offsets] return retrieved_list def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.get_tokenizer(word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" lowerCamelCase__ = [1_1, 5, 5, 5, 1_5, 1_5, tokenizer.pad_token_id, 1_5, 1_5, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 1_5, 8, 8, 8, tokenizer.word_delimiter_token_id, 9_8] # fmt: on lowerCamelCase__ = tokenizer.decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase , filter_word_delimiter_token=__lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''char_offsets''' in outputs ) self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 1_1, 1_2, 1_5, 1_6] ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 1_0, 1_2, 1_5, 1_6, 1_7] ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.get_tokenizer(word_delimiter_token='''|''' ) def check_list_tuples_equal(__lowerCAmelCase , __lowerCAmelCase ): self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) ) self.assertTrue(isinstance(outputs_list[0] , __lowerCAmelCase ) ) # transform list to ModelOutput lowerCamelCase__ = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] ) def recursive_check(__lowerCAmelCase , __lowerCAmelCase ): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): [recursive_check(__lowerCAmelCase , __lowerCAmelCase ) for la, la in zip(__lowerCAmelCase , __lowerCAmelCase )] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] ) # fmt: off lowerCamelCase__ = [ [1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 4, 8, 9_8, 3_2, 3_2, 3_2, 3_2, 4, 3_3, tokenizer.word_delimiter_token_id, 3_2, 3_2, 3_3, 3_4, 3_4], [2_4, 2_2, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 2_4, 2_2, 2_2, 2_2, 4, 5, 7_7, tokenizer.pad_token_id, 2_2, 2_2, 4, 3_4, 3_4, 3_4, 3_4], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char lowerCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase ) lowerCamelCase__ = [tokenizer.decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase ) for ids in sample_ids] check_list_tuples_equal(__lowerCAmelCase , __lowerCAmelCase ) @unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.get_tokenizers(do_lower_case=__lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): lowerCamelCase__ = tokenizer.vocab_size lowerCamelCase__ = len(__lowerCAmelCase ) self.assertNotEqual(__lowerCAmelCase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) lowerCamelCase__ = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] lowerCamelCase__ = tokenizer.add_tokens(__lowerCAmelCase ) lowerCamelCase__ = tokenizer.vocab_size lowerCamelCase__ = len(__lowerCAmelCase ) self.assertNotEqual(__lowerCAmelCase , 0 ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) ) self.assertEqual(__lowerCAmelCase , all_size + len(__lowerCAmelCase ) ) lowerCamelCase__ = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowerCAmelCase ) self.assertGreaterEqual(len(__lowerCAmelCase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) lowerCamelCase__ = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} lowerCamelCase__ = tokenizer.add_special_tokens(__lowerCAmelCase ) lowerCamelCase__ = tokenizer.vocab_size lowerCamelCase__ = len(__lowerCAmelCase ) self.assertNotEqual(__lowerCAmelCase , 0 ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) ) self.assertEqual(__lowerCAmelCase , all_size_a + len(__lowerCAmelCase ) ) lowerCamelCase__ = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowerCAmelCase ) self.assertGreaterEqual(len(__lowerCAmelCase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): lowerCamelCase__ = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t'''] lowerCamelCase__ = tokenizer.convert_tokens_to_string(__lowerCAmelCase ) self.assertIsInstance(output['''text'''] , __lowerCAmelCase )
209
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging _a = logging.get_logger(__name__) _a = "▁" _a = { "vocab_file": "vocab.json", "spm_file": "sentencepiece.bpe.model", "tokenizer_config_file": "tokenizer_config.json", } _a = { "vocab_file": { "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json", "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json", }, "spm_file": { "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model", "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model", }, "tokenizer_config_file": { "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json", "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json", }, } _a = { "facebook/m2m100_418M": 1_024, } # fmt: off _a = { "m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"], "wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"] } class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = ["""input_ids""", """attention_mask"""] lowerCAmelCase_ = [] lowerCAmelCase_ = [] def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="m2m100" , __lowerCAmelCase = None , __lowerCAmelCase=8 , **__lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs lowerCamelCase__ = language_codes lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES[language_codes] lowerCamelCase__ = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code} lowerCamelCase__ = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(__lowerCAmelCase ) for lang_code in fairseq_language_code if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , ) lowerCamelCase__ = vocab_file lowerCamelCase__ = load_json(__lowerCAmelCase ) lowerCamelCase__ = {v: k for k, v in self.encoder.items()} lowerCamelCase__ = spm_file lowerCamelCase__ = load_spm(__lowerCAmelCase , self.sp_model_kwargs ) lowerCamelCase__ = len(self.encoder ) lowerCamelCase__ = { self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase ) } lowerCamelCase__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )} lowerCamelCase__ = {v: k for k, v in self.lang_token_to_id.items()} lowerCamelCase__ = src_lang if src_lang is not None else '''en''' lowerCamelCase__ = tgt_lang lowerCamelCase__ = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) lowerCamelCase__ = num_madeup_words @property def __lowerCamelCase ( self ): '''simple docstring''' return len(self.encoder ) + len(self.lang_token_to_id ) @property def __lowerCamelCase ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(__lowerCAmelCase , self.unk_token ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = [] lowerCamelCase__ = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCAmelCase ) + token lowerCamelCase__ = [] else: current_sub_tokens.append(__lowerCAmelCase ) out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) lowerCamelCase__ = [1] * len(self.prefix_tokens ) lowerCamelCase__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): '''simple docstring''' lowerCamelCase__ = self.__dict__.copy() lowerCamelCase__ = None return state def __setstate__( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowerCamelCase__ = {} lowerCamelCase__ = load_spm(self.spm_file , self.sp_model_kwargs ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' lowerCamelCase__ = Path(__lowerCAmelCase ) if not save_dir.is_dir(): raise OSError(F'{save_directory} should be a directory' ) lowerCamelCase__ = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) lowerCamelCase__ = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , __lowerCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __lowerCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: lowerCamelCase__ = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (str(__lowerCAmelCase ), str(__lowerCAmelCase )) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = "en" , __lowerCAmelCase = None , __lowerCAmelCase = "ro" , **__lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = src_lang lowerCamelCase__ = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) lowerCamelCase__ = src_lang lowerCamelCase__ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase ) lowerCamelCase__ = self.get_lang_id(__lowerCAmelCase ) lowerCamelCase__ = tgt_lang_id return inputs def __lowerCamelCase ( self ): '''simple docstring''' self.set_src_lang_special_tokens(self.src_lang ) def __lowerCamelCase ( self ): '''simple docstring''' self.set_tgt_lang_special_tokens(self.tgt_lang ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase ) lowerCamelCase__ = self.lang_token_to_id[lang_token] lowerCamelCase__ = [self.cur_lang_id] lowerCamelCase__ = [self.eos_token_id] def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase ) lowerCamelCase__ = self.lang_token_to_id[lang_token] lowerCamelCase__ = [self.cur_lang_id] lowerCamelCase__ = [self.eos_token_id] def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' return self.lang_code_to_token[lang] def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase ) return self.lang_token_to_id[lang_token] def lowerCAmelCase__(__snake_case ,__snake_case ) -> sentencepiece.SentencePieceProcessor: '''simple docstring''' lowerCamelCase__ = sentencepiece.SentencePieceProcessor(**__snake_case ) spm.Load(str(__snake_case ) ) return spm def lowerCAmelCase__(__snake_case ) -> Union[Dict, List]: '''simple docstring''' with open(__snake_case ,'''r''' ) as f: return json.load(__snake_case ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> None: '''simple docstring''' with open(__snake_case ,'''w''' ) as f: json.dump(__snake_case ,__snake_case ,indent=2 )
209
1
"""simple docstring""" import enum import shutil import sys lowercase__ : List[str] = shutil.get_terminal_size() lowercase__ : Dict = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""} class UpperCamelCase__ ( enum.Enum ): """simple docstring""" _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 1 def UpperCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple="" ) -> Dict: """simple docstring""" sys.stdout.write(str(__snake_case ) + end ) sys.stdout.flush() def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict="" ) -> Optional[Any]: """simple docstring""" forceWrite(f"\u001b[{color}m{content}\u001b[0m" , __snake_case ) def UpperCamelCase_ ( ) -> int: """simple docstring""" forceWrite('\r' ) def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ) -> Tuple: """simple docstring""" forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" ) def UpperCamelCase_ ( ) -> Any: """simple docstring""" forceWrite(' ' * TERMINAL_WIDTH ) reset_cursor() def UpperCamelCase_ ( ) -> int: """simple docstring""" reset_cursor() forceWrite('-' * TERMINAL_WIDTH )
356
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_3 , SCREAMING_SNAKE_CASE_ : Dict=7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=9_9 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : int=3_2 , SCREAMING_SNAKE_CASE_ : Dict=5 , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : Optional[int]=3 , SCREAMING_SNAKE_CASE_ : int=4 , SCREAMING_SNAKE_CASE_ : List[Any]="last" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : str=None , ): lowerCAmelCase_ : Tuple = parent lowerCAmelCase_ : Tuple = batch_size lowerCAmelCase_ : str = seq_length lowerCAmelCase_ : List[Any] = is_training lowerCAmelCase_ : Optional[int] = use_input_lengths lowerCAmelCase_ : Union[str, Any] = use_token_type_ids lowerCAmelCase_ : str = use_labels lowerCAmelCase_ : str = gelu_activation lowerCAmelCase_ : str = sinusoidal_embeddings lowerCAmelCase_ : List[Any] = causal lowerCAmelCase_ : Union[str, Any] = asm lowerCAmelCase_ : Union[str, Any] = n_langs lowerCAmelCase_ : Optional[Any] = vocab_size lowerCAmelCase_ : Any = n_special lowerCAmelCase_ : Union[str, Any] = hidden_size lowerCAmelCase_ : str = num_hidden_layers lowerCAmelCase_ : Tuple = num_attention_heads lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob lowerCAmelCase_ : Dict = max_position_embeddings lowerCAmelCase_ : Union[str, Any] = type_vocab_size lowerCAmelCase_ : List[Any] = type_sequence_label_size lowerCAmelCase_ : str = initializer_range lowerCAmelCase_ : Dict = num_labels lowerCAmelCase_ : Union[str, Any] = num_choices lowerCAmelCase_ : Union[str, Any] = summary_type lowerCAmelCase_ : Optional[Any] = use_proj lowerCAmelCase_ : List[Any] = scope def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ : str = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase_ : Dict = None if self.use_input_lengths: lowerCAmelCase_ : Any = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCAmelCase_ : Any = None if self.use_token_type_ids: lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCAmelCase_ : Dict = None lowerCAmelCase_ : Optional[int] = None lowerCAmelCase_ : List[str] = None if self.use_labels: lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase_ : Any = ids_tensor([self.batch_size] , 2 ).float() lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase_ : Union[str, Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , ): lowerCAmelCase_ : Union[str, Any] = FlaubertModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCAmelCase_ : Any = model(SCREAMING_SNAKE_CASE_ , lengths=SCREAMING_SNAKE_CASE_ , langs=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Any = model(SCREAMING_SNAKE_CASE_ , langs=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , ): lowerCAmelCase_ : Any = FlaubertWithLMHeadModel(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCAmelCase_ : Dict = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , ): lowerCAmelCase_ : Tuple = FlaubertForQuestionAnsweringSimple(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCAmelCase_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Optional[int] = model(SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , ): lowerCAmelCase_ : Optional[int] = FlaubertForQuestionAnswering(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Any = model( SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , cls_index=SCREAMING_SNAKE_CASE_ , is_impossible=SCREAMING_SNAKE_CASE_ , p_mask=SCREAMING_SNAKE_CASE_ , ) lowerCAmelCase_ : Optional[int] = model( SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , cls_index=SCREAMING_SNAKE_CASE_ , is_impossible=SCREAMING_SNAKE_CASE_ , ) ((lowerCAmelCase_) ,) : int = result_with_labels.to_tuple() lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ ) ((lowerCAmelCase_) ,) : Dict = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , ): lowerCAmelCase_ : Optional[int] = FlaubertForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCAmelCase_ : Any = model(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , ): lowerCAmelCase_ : List[Any] = self.num_labels lowerCAmelCase_ : Optional[Any] = FlaubertForTokenClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , ): lowerCAmelCase_ : Dict = self.num_choices lowerCAmelCase_ : Optional[Any] = FlaubertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCAmelCase_ : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ : Optional[Any] = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs() ( ( lowerCAmelCase_ ) ,( lowerCAmelCase_ ) ,( lowerCAmelCase_ ) ,( lowerCAmelCase_ ) ,( lowerCAmelCase_ ) ,( lowerCAmelCase_ ) ,( lowerCAmelCase_ ) ,( lowerCAmelCase_ ) ,( lowerCAmelCase_ ) , ) : Dict = config_and_inputs lowerCAmelCase_ : List[str] = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = ( { """feature-extraction""": FlaubertModel, """fill-mask""": FlaubertWithLMHeadModel, """question-answering""": FlaubertForQuestionAnsweringSimple, """text-classification""": FlaubertForSequenceClassification, """token-classification""": FlaubertForTokenClassification, """zero-shot""": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ): lowerCAmelCase_ : Any = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowerCAmelCase_ : Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) return inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Any ): lowerCAmelCase_ : Tuple = FlaubertModelTester(self ) lowerCAmelCase_ : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , emb_dim=3_7 ) def SCREAMING_SNAKE_CASE__ ( self : Any ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*SCREAMING_SNAKE_CASE_ ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ : Optional[Any] = FlaubertModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : Tuple ): lowerCAmelCase_ ,lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowerCAmelCase_ : int = True lowerCAmelCase_ : Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : int = torch.jit.trace( SCREAMING_SNAKE_CASE_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , 'traced_model.pt' ) ) lowerCAmelCase_ : Optional[Any] = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , 'traced_model.pt' ) , map_location=SCREAMING_SNAKE_CASE_ ) loaded(inputs_dict['input_ids'].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['attention_mask'].to(SCREAMING_SNAKE_CASE_ ) ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowerCAmelCase_ : int = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' ) lowerCAmelCase_ : List[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) with torch.no_grad(): lowerCAmelCase_ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )[0] lowerCAmelCase_ : Optional[Any] = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Optional[int] = torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
289
0
"""simple docstring""" import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class lowercase__ : _UpperCAmelCase :Tuple = None def UpperCAmelCase__ ( self : Tuple ): lowerCamelCase_ : int =self.feature_extraction_class(**self.feat_extract_dict ) lowerCamelCase_ : int =json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , snake_case__ ) def UpperCAmelCase__ ( self : Tuple ): lowerCamelCase_ : List[str] =self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase_ : Optional[int] =os.path.join(snake_case__ , "feat_extract.json" ) feat_extract_first.to_json_file(snake_case__ ) lowerCamelCase_ : Optional[Any] =self.feature_extraction_class.from_json_file(snake_case__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def UpperCAmelCase__ ( self : str ): lowerCamelCase_ : Union[str, Any] =self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase_ : List[str] =feat_extract_first.save_pretrained(snake_case__ )[0] check_json_file_has_correct_format(snake_case__ ) lowerCamelCase_ : str =self.feature_extraction_class.from_pretrained(snake_case__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def UpperCAmelCase__ ( self : Optional[Any] ): lowerCamelCase_ : Tuple =self.feature_extraction_class() self.assertIsNotNone(snake_case__ )
144
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : Dict = logging.get_logger(__name__) A__ : Union[str, Any] = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class lowercase__ ( snake_case__ ): _UpperCAmelCase :List[str] = "canine" def __init__( self : Optional[Any] , snake_case__ : Union[str, Any]=768 , snake_case__ : Tuple=12 , snake_case__ : Optional[Any]=12 , snake_case__ : Union[str, Any]=3072 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : int=1_6384 , snake_case__ : str=16 , snake_case__ : Tuple=0.02 , snake_case__ : Dict=1E-12 , snake_case__ : Any=0 , snake_case__ : Optional[int]=0xe_000 , snake_case__ : List[str]=0xe_001 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : List[Any]=8 , snake_case__ : List[str]=1_6384 , snake_case__ : Union[str, Any]=128 , **snake_case__ : Tuple , ): super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) lowerCamelCase_ : Tuple =max_position_embeddings lowerCamelCase_ : Optional[int] =hidden_size lowerCamelCase_ : Tuple =num_hidden_layers lowerCamelCase_ : Dict =num_attention_heads lowerCamelCase_ : str =intermediate_size lowerCamelCase_ : Dict =hidden_act lowerCamelCase_ : List[Any] =hidden_dropout_prob lowerCamelCase_ : Union[str, Any] =attention_probs_dropout_prob lowerCamelCase_ : Dict =initializer_range lowerCamelCase_ : Tuple =type_vocab_size lowerCamelCase_ : Optional[Any] =layer_norm_eps # Character config: lowerCamelCase_ : List[str] =downsampling_rate lowerCamelCase_ : List[Any] =upsampling_kernel_size lowerCamelCase_ : Any =num_hash_functions lowerCamelCase_ : Optional[int] =num_hash_buckets lowerCamelCase_ : Union[str, Any] =local_transformer_stride
144
1
def lowerCamelCase__ ( A : int , A : int ): '''simple docstring''' if b == 0: return 1 if (b % 2) == 0: return actual_power(snake_case_ , int(b / 2 ) ) * actual_power(snake_case_ , int(b / 2 ) ) else: return a * actual_power(snake_case_ , int(b / 2 ) ) * actual_power(snake_case_ , int(b / 2 ) ) def lowerCamelCase__ ( A : int , A : int ): '''simple docstring''' if b < 0: return 1 / actual_power(snake_case_ , snake_case_ ) return actual_power(snake_case_ , snake_case_ ) if __name__ == "__main__": print(power(-2, -3))
355
'''simple docstring''' # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
91
0
'''simple docstring''' def lowercase__( __UpperCamelCase: List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 1 for i in range(1 ,num + 1 ): fact *= i return fact def lowercase__( __UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 while number > 0: SCREAMING_SNAKE_CASE : Optional[Any] = number % 10 sum_of_digits += last_digit SCREAMING_SNAKE_CASE : str = number // 10 # Removing the last_digit from the given number return sum_of_digits def lowercase__( __UpperCamelCase: str = 1_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = factorial(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Dict = split_and_add(__UpperCamelCase ) return result if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
251
import numpy as np def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1e-12 , SCREAMING_SNAKE_CASE = 100 , ): '''simple docstring''' assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[1] # Ensure proper dimensionality. assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(SCREAMING_SNAKE_CASE ) == np.iscomplexobj(SCREAMING_SNAKE_CASE ) __UpperCamelCase :List[Any] = np.iscomplexobj(SCREAMING_SNAKE_CASE ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(SCREAMING_SNAKE_CASE , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __UpperCamelCase :str = False __UpperCamelCase :int = 0 __UpperCamelCase :Optional[Any] = 0 __UpperCamelCase :Union[str, Any] = 1e12 while not convergence: # Multiple matrix by the vector. __UpperCamelCase :List[str] = np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Normalize the resulting output vector. __UpperCamelCase :Tuple = w / np.linalg.norm(SCREAMING_SNAKE_CASE ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __UpperCamelCase :int = vector.conj().T if is_complex else vector.T __UpperCamelCase :Optional[int] = np.dot(SCREAMING_SNAKE_CASE , np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) # Check convergence. __UpperCamelCase :Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __UpperCamelCase :Dict = True __UpperCamelCase :List[Any] = lambda_ if is_complex: __UpperCamelCase :Tuple = np.real(lambda_ ) return lambda_, vector def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) __UpperCamelCase :Optional[Any] = np.array([41, 4, 20] ) __UpperCamelCase :Any = real_input_matrix.astype(np.complexaaa ) __UpperCamelCase :Dict = np.triu(1j * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __UpperCamelCase :Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": __UpperCamelCase :Any = real_input_matrix __UpperCamelCase :int = real_vector elif problem_type == "complex": __UpperCamelCase :Tuple = complex_input_matrix __UpperCamelCase :Optional[Any] = complex_vector # Our implementation. __UpperCamelCase , __UpperCamelCase :Dict = power_iteration(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __UpperCamelCase , __UpperCamelCase :List[Any] = np.linalg.eigh(SCREAMING_SNAKE_CASE ) # Last eigenvalue is the maximum one. __UpperCamelCase :List[Any] = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __UpperCamelCase :str = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1e-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE ) - np.abs(SCREAMING_SNAKE_CASE ) ) <= 1e-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
43
0
'''simple docstring''' from __future__ import annotations import math import random from typing import Any class a__ : def __init__( self ): """simple docstring""" _lowercase : list[Any] = [] _lowercase : int = 0 _lowercase : int = 0 def _lowerCamelCase ( self ): """simple docstring""" return self.head == self.tail def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" self.data.append(_UpperCamelCase ) _lowercase : Optional[int] = self.tail + 1 def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Union[str, Any] = self.data[self.head] _lowercase : Tuple = self.head + 1 return ret def _lowerCamelCase ( self ): """simple docstring""" return self.tail - self.head def _lowerCamelCase ( self ): """simple docstring""" print(self.data ) print("**************" ) print(self.data[self.head : self.tail] ) class a__ : def __init__( self , _UpperCamelCase ): """simple docstring""" _lowercase : Dict = data _lowercase : MyNode | None = None _lowercase : MyNode | None = None _lowercase : int = 1 def _lowerCamelCase ( self ): """simple docstring""" return self.data def _lowerCamelCase ( self ): """simple docstring""" return self.left def _lowerCamelCase ( self ): """simple docstring""" return self.right def _lowerCamelCase ( self ): """simple docstring""" return self.height def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" _lowercase : Optional[Any] = data def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" _lowercase : Union[str, Any] = node def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" _lowercase : Dict = node def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" _lowercase : List[str] = height def _A ( snake_case ) -> int: if node is None: return 0 return node.get_height() def _A ( snake_case , snake_case ) -> int: if a > b: return a return b def _A ( snake_case ) -> MyNode: print("left rotation node:" , node.get_data() ) _lowercase : str = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(snake_case ) _lowercase : Dict = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(snake_case ) _lowercase : Dict = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(snake_case ) return ret def _A ( snake_case ) -> MyNode: print("right rotation node:" , node.get_data() ) _lowercase : Any = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(snake_case ) _lowercase : str = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(snake_case ) _lowercase : Tuple = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(snake_case ) return ret def _A ( snake_case ) -> MyNode: _lowercase : Optional[Any] = node.get_left() assert left_child is not None node.set_left(left_rotation(snake_case ) ) return right_rotation(snake_case ) def _A ( snake_case ) -> MyNode: _lowercase : List[str] = node.get_right() assert right_child is not None node.set_right(right_rotation(snake_case ) ) return left_rotation(snake_case ) def _A ( snake_case , snake_case ) -> MyNode | None: if node is None: return MyNode(snake_case ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , snake_case ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected _lowercase : Dict = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child _lowercase : Optional[int] = right_rotation(snake_case ) else: _lowercase : Union[str, Any] = lr_rotation(snake_case ) else: node.set_right(insert_node(node.get_right() , snake_case ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: _lowercase : int = node.get_right() assert right_child is not None if data < right_child.get_data(): _lowercase : Union[str, Any] = rl_rotation(snake_case ) else: _lowercase : Any = left_rotation(snake_case ) _lowercase : Any = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(snake_case ) return node def _A ( snake_case ) -> Any: while True: _lowercase : Tuple = root.get_right() if right_child is None: break _lowercase : Tuple = right_child return root.get_data() def _A ( snake_case ) -> Any: while True: _lowercase : List[str] = root.get_left() if left_child is None: break _lowercase : int = left_child return root.get_data() def _A ( snake_case , snake_case ) -> MyNode | None: _lowercase : Dict = root.get_left() _lowercase : Any = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: _lowercase : Tuple = get_left_most(snake_case ) root.set_data(snake_case ) root.set_right(del_node(snake_case , snake_case ) ) elif left_child is not None: _lowercase : Optional[Any] = left_child elif right_child is not None: _lowercase : Union[str, Any] = right_child else: return None elif root.get_data() > data: if left_child is None: print("No such data" ) return root else: root.set_left(del_node(snake_case , snake_case ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(snake_case , snake_case ) ) if get_height(snake_case ) - get_height(snake_case ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): _lowercase : Dict = left_rotation(snake_case ) else: _lowercase : Optional[Any] = rl_rotation(snake_case ) elif get_height(snake_case ) - get_height(snake_case ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): _lowercase : Optional[Any] = right_rotation(snake_case ) else: _lowercase : Optional[Any] = lr_rotation(snake_case ) _lowercase : List[str] = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(snake_case ) return root class a__ : def __init__( self ): """simple docstring""" _lowercase : MyNode | None = None def _lowerCamelCase ( self ): """simple docstring""" return get_height(self.root ) def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" print("insert:" + str(_UpperCamelCase ) ) _lowercase : str = insert_node(self.root , _UpperCamelCase ) def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" print("delete:" + str(_UpperCamelCase ) ) if self.root is None: print("Tree is empty!" ) return _lowercase : Tuple = del_node(self.root , _UpperCamelCase ) def __str__( self , ): # a level traversale, gives a more intuitive look on the tree """simple docstring""" _lowercase : str = "" _lowercase : Optional[Any] = MyQueue() q.push(self.root ) _lowercase : Tuple = self.get_height() if layer == 0: return output _lowercase : Optional[Any] = 0 while not q.is_empty(): _lowercase : List[Any] = q.pop() _lowercase : str = " " * int(math.pow(2 , layer - 1 ) ) output += space if node is None: output += "*" q.push(_UpperCamelCase ) q.push(_UpperCamelCase ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space _lowercase : Optional[int] = cnt + 1 for i in range(100 ): if cnt == math.pow(2 , _UpperCamelCase ) - 1: _lowercase : List[Any] = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def _A ( ) -> None: import doctest doctest.testmod() if __name__ == "__main__": _test() _snake_case = AVLtree() _snake_case = list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
199
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow _snake_case = logging.getLogger() @unittest.skip('Temporarily disable the doc tests.' ) @require_torch @require_tf @slow class a__ ( unittest.TestCase ): def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , ): """simple docstring""" _lowercase : str = [file for file in os.listdir(_UpperCamelCase ) if os.path.isfile(os.path.join(_UpperCamelCase , _UpperCamelCase ) )] if identifier is not None: _lowercase : str = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_UpperCamelCase , _UpperCamelCase ): for n_ in n_identifier: _lowercase : Dict = [file for file in files if n_ not in file] else: _lowercase : Optional[Any] = [file for file in files if n_identifier not in file] _lowercase : Dict = ignore_files or [] ignore_files.append("__init__.py" ) _lowercase : List[str] = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing" , _UpperCamelCase ) if only_modules: _lowercase : Optional[Any] = file.split("." )[0] try: _lowercase : Union[str, Any] = getattr(_UpperCamelCase , _UpperCamelCase ) _lowercase : Optional[int] = doctest.DocTestSuite(_UpperCamelCase ) _lowercase : Tuple = unittest.TextTestRunner().run(_UpperCamelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: _lowercase : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Tuple = Path("src/transformers" ) _lowercase : str = "modeling" _lowercase : Tuple = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(_UpperCamelCase , identifier=_UpperCamelCase , ignore_files=_UpperCamelCase ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Optional[int] = Path("src/transformers" ) _lowercase : Any = "tokenization" self.analyze_directory(_UpperCamelCase , identifier=_UpperCamelCase ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Tuple = Path("src/transformers" ) _lowercase : Optional[Any] = "configuration" self.analyze_directory(_UpperCamelCase , identifier=_UpperCamelCase ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Union[str, Any] = Path("src/transformers" ) _lowercase : List[Any] = ["configuration", "modeling", "tokenization"] self.analyze_directory(_UpperCamelCase , n_identifier=_UpperCamelCase ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Dict = Path("docs/source" ) _lowercase : int = ["favicon.ico"] self.analyze_directory(_UpperCamelCase , ignore_files=_UpperCamelCase , only_modules=_UpperCamelCase )
199
1
from math import factorial, pi def SCREAMING_SNAKE_CASE_ ( __A : float , __A : int = 30 ) -> float: """simple docstring""" if not isinstance(__A , (int, float) ): raise ValueError('maclaurin_sin() requires either an int or float for theta' ) if not isinstance(__A , __A ) or accuracy <= 0: raise ValueError('maclaurin_sin() requires a positive int for accuracy' ) a_ : Tuple = float(__A ) a_ : int = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) ) def SCREAMING_SNAKE_CASE_ ( __A : float , __A : int = 30 ) -> float: """simple docstring""" if not isinstance(__A , (int, float) ): raise ValueError('maclaurin_cos() requires either an int or float for theta' ) if not isinstance(__A , __A ) or accuracy <= 0: raise ValueError('maclaurin_cos() requires a positive int for accuracy' ) a_ : Dict = float(__A ) a_ : Union[str, Any] = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
32
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. UpperCAmelCase_ : Dict = {'LayoutLMv2Config', 'LayoutLMv3Config'} @is_pipeline_test class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): snake_case__ : List[str] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING snake_case__ : Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: snake_case__ : str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: snake_case__ : List[Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: a_ : List[Any] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' ) a_ : int = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) a_ : Tuple = text_classifier('This is great !' , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] ) a_ : List[str] = text_classifier(['This is great !', 'This is bad'] , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], ] , ) a_ : Tuple = text_classifier('This is great !' , top_k=1 ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) # Legacy behavior a_ : Union[str, Any] = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) a_ : List[str] = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] ) a_ : int = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], ] , ) a_ : str = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ {'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_0', 'score': 0.504}, ] , ) @require_torch def SCREAMING_SNAKE_CASE ( self : int ) -> Dict: import torch a_ : List[Any] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , ) a_ : Any = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) @require_tf def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: a_ : List[str] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' ) a_ : Optional[int] = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) @slow @require_torch def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: a_ : List[str] = pipeline('text-classification' ) a_ : Dict = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] ) a_ : Union[str, Any] = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) a_ : Tuple = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.988}] ) @slow @require_tf def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: a_ : Dict = pipeline('text-classification' , framework='tf' ) a_ : Optional[Any] = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] ) a_ : int = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) a_ : Optional[int] = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.988}] ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any: a_ : Optional[Any] = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) return text_classifier, ["HuggingFace is in", "This is another test"] def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]: a_ : List[str] = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 a_ : Union[str, Any] = 'HuggingFace is in' a_ : int = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) a_ : Union[str, Any] = ['HuggingFace is in ', 'Paris is in France'] a_ : int = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}, {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format a_ : List[Any] = text_classifier(SCREAMING_SNAKE_CASE__ , top_k=SCREAMING_SNAKE_CASE__ ) a_ : Dict = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N, [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N] , ) a_ : int = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'} a_ : Optional[int] = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )} , ) self.assertTrue(outputs['label'] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. a_ : Any = [['HuggingFace is in ', 'Paris is in France']] with self.assertRaises(SCREAMING_SNAKE_CASE__ ): text_classifier(SCREAMING_SNAKE_CASE__ ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility a_ : Tuple = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
32
1
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed lowerCAmelCase__ :Any = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''): from run_translation import main # noqa set_seed(4_2) lowerCAmelCase__ :List[str] = '''sshleifer/student_marian_en_ro_6_1''' lowerCAmelCase__ :Dict = '''sshleifer/tiny-mbart''' @require_torch class __a ( A__ ): def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.run_trainer( eval_steps=1 , max_len=12 , model_name=__snake_case , num_train_epochs=1 , distributed=__snake_case , extra_args_str=__snake_case , predict_with_generate=__snake_case , do_train=__snake_case , do_eval=__snake_case , do_predict=__snake_case , ) _UpperCAmelCase = TrainerState.load_from_json(os.path.join(__snake_case , 'trainer_state.json' ) ).log_history if not do_eval: return _UpperCAmelCase = [log for log in logs if 'eval_loss' in log.keys()] _UpperCAmelCase = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats _UpperCAmelCase = eval_metrics[-1] assert isinstance(last_step_stats['eval_bleu'] , __snake_case ) assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def UpperCAmelCase__ ( self ) -> int: """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" self.run_seqaseq_quick(distributed=__snake_case ) @require_torch_multi_gpu def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" self.run_seqaseq_quick(distributed=__snake_case ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" self.run_seqaseq_quick(distributed=__snake_case , extra_args_str='--sharded_ddp simple' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" self.run_seqaseq_quick(distributed=__snake_case , extra_args_str='--sharded_ddp simple --fp16' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def UpperCAmelCase__ ( self ) -> str: """simple docstring""" self.run_seqaseq_quick(distributed=__snake_case , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=__snake_case ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" self.run_seqaseq_quick( distributed=__snake_case , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=__snake_case ) @require_apex @require_torch_gpu def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" self.run_seqaseq_quick(distributed=__snake_case , extra_args_str='--fp16 --fp16_backend=apex' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__snake_case , extra_args_str='--fp16 --fp16_backend=apex' ) @parameterized.expand(['base', 'low', 'high', 'mixed'] ) @require_torch_multi_gpu def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = { # test with the default log_level - should be info and thus log info once 'base': {'extra_args_str': '', 'n_matches': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes 'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica 'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1}, # test with high log_level and log_level_replica - should be quiet on all processes 'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0}, } _UpperCAmelCase = experiments[experiment_id] _UpperCAmelCase = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False} _UpperCAmelCase = 'Running training' with CaptureStderr() as cl: self.run_seqaseq_quick(**__snake_case , extra_args_str=data['extra_args_str'] ) _UpperCAmelCase = len(re.findall(__snake_case , cl.err ) ) self.assertEqual(__snake_case , data['n_matches'] ) @slow def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.run_trainer( eval_steps=2 , max_len=128 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=10 , distributed=__snake_case , ) # Check metrics _UpperCAmelCase = TrainerState.load_from_json(os.path.join(__snake_case , 'trainer_state.json' ) ).log_history _UpperCAmelCase = [log for log in logs if 'eval_loss' in log.keys()] _UpperCAmelCase = eval_metrics[0] _UpperCAmelCase = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['eval_bleu'] , __snake_case ) # test if do_predict saves generations and metrics _UpperCAmelCase = os.listdir(__snake_case ) _UpperCAmelCase = {os.path.basename(__snake_case ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(_SCREAMING_SNAKE_CASE ) -> Tuple[int, float]: _UpperCAmelCase = '--skip_memory_metrics 0' _UpperCAmelCase = self.run_trainer( max_len=128 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=1 , optim=__snake_case , distributed=__snake_case , extra_args_str=__snake_case , do_eval=__snake_case , do_predict=__snake_case , n_gpus_to_use=1 , ) # Check metrics _UpperCAmelCase = TrainerState.load_from_json(Path(__snake_case , 'trainer_state.json' ) ).log_history _UpperCAmelCase = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 ) _UpperCAmelCase = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 ) _UpperCAmelCase = logs[0]['train_loss'] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) _UpperCAmelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb _UpperCAmelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig _UpperCAmelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb _UpperCAmelCase = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings _UpperCAmelCase = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __snake_case , __snake_case , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got' f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and''' f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , ) self.assertGreater( __snake_case , __snake_case , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got' f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and''' f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , ) self.assertEqual( __snake_case , __snake_case , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 3e-3 , _SCREAMING_SNAKE_CASE = "adafactor" , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , ) -> Dict: """simple docstring""" _UpperCAmelCase = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro' _UpperCAmelCase = self.get_auto_remove_tmp_dir() _UpperCAmelCase = f''' --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__snake_case )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__snake_case )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX '''.split() _UpperCAmelCase = f''' --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__snake_case )} '''.split() _UpperCAmelCase = '\n --do_predict\n '.split() _UpperCAmelCase = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += f'''--optim {optim}'''.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: _UpperCAmelCase = get_gpu_count() _UpperCAmelCase = get_torch_dist_unique_port() _UpperCAmelCase = f''' -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py '''.split() _UpperCAmelCase = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__snake_case , env=self.get_env() ) else: _UpperCAmelCase = ['run_translation.py'] + args with patch.object(__snake_case , 'argv' , __snake_case ): main() return output_dir
353
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file lowerCAmelCase__ :Dict = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.''' def lowerCAmelCase__ ( a__: Optional[Any]=None ) -> List[Any]: '''simple docstring''' if subparsers is not None: _UpperCAmelCase = subparsers.add_parser('tpu-config' , description=_description ) else: _UpperCAmelCase = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description ) # Core arguments _UpperCAmelCase = parser.add_argument_group( 'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' ) config_args.add_argument( '--config_file' , type=a__ , default=a__ , help='Path to the config file to use for accelerate.' , ) config_args.add_argument( '--tpu_name' , default=a__ , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , ) config_args.add_argument( '--tpu_zone' , default=a__ , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , ) _UpperCAmelCase = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' ) pod_args.add_argument( '--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , ) pod_args.add_argument( '--command_file' , default=a__ , help='The path to the file containing the commands to run on the pod on startup.' , ) pod_args.add_argument( '--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , ) pod_args.add_argument( '--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , ) pod_args.add_argument( '--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , ) pod_args.add_argument( '--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' ) if subparsers is not None: parser.set_defaults(func=a__ ) return parser def lowerCAmelCase__ ( a__: str ) -> Any: '''simple docstring''' _UpperCAmelCase = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(a__ ): _UpperCAmelCase = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: _UpperCAmelCase = defaults.command_file if not args.command and defaults.commands is not None: _UpperCAmelCase = defaults.commands if not args.tpu_name: _UpperCAmelCase = defaults.tpu_name if not args.tpu_zone: _UpperCAmelCase = defaults.tpu_zone if args.accelerate_version == "dev": _UpperCAmelCase = 'git+https://github.com/huggingface/accelerate.git' elif args.accelerate_version == "latest": _UpperCAmelCase = 'accelerate -U' elif isinstance(parse(args.accelerate_version ) , a__ ): _UpperCAmelCase = F'''accelerate=={args.accelerate_version}''' if not args.command_file and not args.command: raise ValueError('You must specify either a command file or a command to run on the pod.' ) if args.command_file: with open(args.command_file , 'r' ) as f: _UpperCAmelCase = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , a__ ): _UpperCAmelCase = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate _UpperCAmelCase = ['cd /usr/share'] if args.install_accelerate: new_cmd += [F'''pip install {args.accelerate_version}'''] new_cmd += args.command _UpperCAmelCase = '; '.join(a__ ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess _UpperCAmelCase = ['gcloud'] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(F'''Running {" ".join(a__ )}''' ) return subprocess.run(a__ ) print('Successfully setup pod.' ) def lowerCAmelCase__ ( ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = tpu_command_parser() _UpperCAmelCase = parser.parse_args() tpu_command_launcher(a__ )
185
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: __UpperCamelCase = None __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase = { '''facebook/nllb-large-en-ro''': 1024, '''facebook/nllb-200-distilled-600M''': 1024, } # fmt: off __UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = NllbTokenizer SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token snake_case_ = legacy_behaviour super().__init__( vocab_file=lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, src_lang=lowerCAmelCase__, tgt_lang=lowerCAmelCase__, additional_special_tokens=lowerCAmelCase__, legacy_behaviour=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True snake_case_ = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens}) snake_case_ = { lang_code: self.convert_tokens_to_ids(lowerCAmelCase__) for lang_code in FAIRSEQ_LANGUAGE_CODES } snake_case_ = src_lang if src_lang is not None else 'eng_Latn' snake_case_ = self.convert_tokens_to_ids(self._src_lang) snake_case_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def a_ ( self) -> str: return self._src_lang @src_lang.setter def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> str: if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') snake_case_ = src_lang snake_case_ = self(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__) snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__) snake_case_ = tgt_lang_id return inputs def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = "eng_Latn", lowerCAmelCase__ = None, lowerCAmelCase__ = "fra_Latn", **lowerCAmelCase__, ) -> BatchEncoding: snake_case_ = src_lang snake_case_ = tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self) -> List[Any]: return self.set_src_lang_special_tokens(self.src_lang) def a_ ( self) -> Tuple: return self.set_tgt_lang_special_tokens(self.tgt_lang) def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__) if self.legacy_behaviour: snake_case_ = [] snake_case_ = [self.eos_token_id, self.cur_lang_code] else: snake_case_ = [self.cur_lang_code] snake_case_ = [self.eos_token_id] snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens) snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens) snake_case_ = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__) if self.legacy_behaviour: snake_case_ = [] snake_case_ = [self.eos_token_id, self.cur_lang_code] else: snake_case_ = [self.cur_lang_code] snake_case_ = [self.eos_token_id] snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens) snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens) snake_case_ = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(lowerCAmelCase__): logger.error(f'Vocabulary path ({save_directory}) should be a directory.') return snake_case_ = os.path.join( lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__): copyfile(self.vocab_file, lowerCAmelCase__) return (out_vocab_file,)
69
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : str = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A ( __snake_case ): __magic_name__ = '''gpt_neo''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" A : Union[str, Any] = vocab_size A : Optional[Any] = max_position_embeddings A : Dict = hidden_size A : Optional[Any] = num_layers A : Tuple = num_heads A : int = intermediate_size A : Optional[Any] = window_size A : List[Any] = activation_function A : Union[str, Any] = resid_dropout A : Any = embed_dropout A : List[Any] = attention_dropout A : str = classifier_dropout A : List[Any] = layer_norm_epsilon A : str = initializer_range A : List[str] = use_cache A : Optional[int] = bos_token_id A : List[Any] = eos_token_id A : int = attention_types A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : List[str] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' import torch A : Tuple = input.size() A : Union[str, Any] = len(snake_case__ ) A : List[str] = shape[dimension] A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ ) A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1 A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None] A : str = [slice(snake_case__ )] * rank A : List[Any] = indices A : Union[str, Any] = input[s] A : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch A : List[str] = torch.arange(1 , snake_case__ ) A : Optional[int] = torch.remainder(snake_case__ , snake_case__ ) A : Optional[int] = remainders == 0 A : Optional[Any] = candidates[divisor_indices] A : Optional[int] = torch.max(snake_case__ ) return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' ) class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' ) A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: A : Dict = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self._config.num_heads def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]: """simple docstring""" A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch A, A : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values A : str = seqlen + 2 A : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A : Any = [ (torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] A : str = common_inputs['''attention_mask'''] if self.use_past: A : Optional[int] = ordered_inputs['''attention_mask'''].dtype A : List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return 13
3
0
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def snake_case_ (UpperCamelCase : int ): '''simple docstring''' if isinstance(UpperCamelCase , collections.abc.Iterable ): return x return (x, x) @require_flax class A : def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> Optional[Any]: """simple docstring""" pass def __lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" pass def __lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" pass def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float ) -> List[Any]: """simple docstring""" _a = np.abs((a - b) ).max() self.assertLessEqual(lowerCAmelCase_ , lowerCAmelCase_ , F'Difference between torch and flax is {diff} (>= {tol}).' ) def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int ) -> List[Any]: """simple docstring""" _a = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_ ) _a = FlaxVisionTextDualEncoderModel(lowerCAmelCase_ ) _a = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) ) def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : List[str] ) -> Union[str, Any]: """simple docstring""" _a , _a = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ ) _a = {'''vision_model''': vision_model, '''text_model''': text_model} _a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_ ) _a = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Optional[Any] ) -> Any: """simple docstring""" _a , _a = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ ) _a = {'''vision_model''': vision_model, '''text_model''': text_model} _a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_ ) _a = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) _a = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase_ ) _a = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ ) _a = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) _a = after_output[0] _a = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase_ , 1e-3 ) def __lowerCAmelCase ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Dict ) -> int: """simple docstring""" _a , _a = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ ) _a = {'''vision_model''': vision_model, '''text_model''': text_model} _a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_ ) _a = model( input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , output_attentions=lowerCAmelCase_ ) _a = output.vision_model_output.attentions self.assertEqual(len(lowerCAmelCase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _a = to_atuple(vision_model.config.image_size ) _a = to_atuple(vision_model.config.patch_size ) _a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _a = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _a = output.text_model_output.attentions self.assertEqual(len(lowerCAmelCase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> int: """simple docstring""" pt_model.to(lowerCAmelCase_ ) pt_model.eval() # prepare inputs _a = inputs_dict _a = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): _a = pt_model(**lowerCAmelCase_ ).to_tuple() _a = fx_model(**lowerCAmelCase_ ).to_tuple() self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowerCAmelCase_ , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCAmelCase_ ) _a = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ ) _a = fx_model_loaded(**lowerCAmelCase_ ).to_tuple() self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowerCAmelCase_ , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCAmelCase_ ) _a = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ , from_flax=lowerCAmelCase_ ) pt_model_loaded.to(lowerCAmelCase_ ) pt_model_loaded.eval() with torch.no_grad(): _a = pt_model_loaded(**lowerCAmelCase_ ).to_tuple() self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowerCAmelCase_ , pt_output_loaded.numpy() , 4e-2 ) def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ) -> int: """simple docstring""" _a = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_ ) _a = VisionTextDualEncoderModel(lowerCAmelCase_ ) _a = FlaxVisionTextDualEncoderModel(lowerCAmelCase_ ) _a = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase_ ) _a = fx_state self.check_pt_flax_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] ) -> str: """simple docstring""" _a = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_ ) _a = VisionTextDualEncoderModel(lowerCAmelCase_ ) _a = FlaxVisionTextDualEncoderModel(lowerCAmelCase_ ) _a = load_flax_weights_in_pytorch_model(lowerCAmelCase_ , fx_model.params ) self.check_pt_flax_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" _a = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCAmelCase_ ) def __lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" _a = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" _a = self.prepare_config_and_inputs() self.check_save_load(**lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" _a = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCAmelCase_ ) @is_pt_flax_cross_test def __lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" _a = self.prepare_config_and_inputs() _a = config_inputs_dict.pop('''vision_config''' ) _a = config_inputs_dict.pop('''text_config''' ) _a = config_inputs_dict self.check_equivalence_pt_to_flax(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) self.check_equivalence_flax_to_pt(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @slow def __lowerCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" _a , _a = self.get_pretrained_model_and_inputs() _a = model_a(**lowerCAmelCase_ ) _a = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCAmelCase_ ) _a = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ ) _a = model_a(**lowerCAmelCase_ ) _a = after_outputs[0] _a = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase_ , 1e-5 ) @require_flax class A ( _a ,unittest.TestCase ): def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" _a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase_ , text_from_pt=lowerCAmelCase_ , ) _a = 13 _a = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _a = random_attention_mask([batch_size, 4] ) _a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ) -> List[str]: """simple docstring""" _a = FlaxViTModel(lowerCAmelCase_ ) _a = FlaxBertModel(lowerCAmelCase_ ) return vision_model, text_model def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" _a = FlaxViTModelTester(self ) _a = FlaxBertModelTester(self ) _a = vit_model_tester.prepare_config_and_inputs() _a = bert_model_tester.prepare_config_and_inputs() _a , _a = vision_config_and_inputs _a , _a , _a , _a = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class A ( _a ,unittest.TestCase ): def __lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" _a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase_ , text_from_pt=lowerCAmelCase_ , ) _a = 13 _a = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _a = random_attention_mask([batch_size, 4] ) _a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] ) -> Tuple: """simple docstring""" _a = FlaxCLIPVisionModel(lowerCAmelCase_ ) _a = FlaxBertModel(lowerCAmelCase_ ) return vision_model, text_model def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" _a = FlaxCLIPVisionModelTester(self ) _a = FlaxBertModelTester(self ) _a = clip_model_tester.prepare_config_and_inputs() _a = bert_model_tester.prepare_config_and_inputs() _a , _a = vision_config_and_inputs _a , _a , _a , _a = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" _a = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 ) _a = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) _a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _a = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''np''' ) _a = model(**lowerCAmelCase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _a = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase_ , atol=1e-3 ) )
179
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class A : def __init__( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=13 , lowerCAmelCase_ : Optional[Any]=10 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : int=37 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Dict=10 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Union[str, Any]=0.9 , lowerCAmelCase_ : str=None , ) -> int: """simple docstring""" _a = parent _a = batch_size _a = image_size _a = num_channels _a = patch_size _a = tubelet_size _a = num_frames _a = is_training _a = use_labels _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = type_sequence_label_size _a = initializer_range _a = mask_ratio _a = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame _a = (image_size // patch_size) ** 2 _a = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos _a = int(mask_ratio * self.seq_length ) def __lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" _a = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) _a = None if self.use_labels: _a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self : List[str] ) -> Any: """simple docstring""" return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]: """simple docstring""" _a = VideoMAEModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _a = model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]: """simple docstring""" _a = VideoMAEForPreTraining(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch _a = torch.ones((self.num_masks,) ) _a = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) _a = mask.expand(self.batch_size , -1 ).bool() _a = model(lowerCAmelCase_ , lowerCAmelCase_ ) # model only returns predictions for masked patches _a = mask.sum().item() _a = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" _a = self.prepare_config_and_inputs() _a , _a , _a = config_and_inputs _a = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class A ( _a ,_a ,unittest.TestCase ): lowercase_ = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) lowercase_ = ( {'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification} if is_torch_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" _a = VideoMAEModelTester(self ) _a = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 ) def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False ) -> Tuple: """simple docstring""" _a = copy.deepcopy(lowerCAmelCase_ ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch _a = torch.ones((self.model_tester.num_masks,) ) _a = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) _a = mask.expand(self.model_tester.batch_size , -1 ).bool() _a = bool_masked_pos.to(lowerCAmelCase_ ) if return_labels: if model_class in [ *get_values(lowerCAmelCase_ ), ]: _a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ ) return inputs_dict def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''VideoMAE does not use inputs_embeds''' ) def __lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" pass def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(lowerCAmelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) ) def __lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(lowerCAmelCase_ ) _a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a = [*signature.parameters.keys()] _a = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : int ) -> Any: """simple docstring""" _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ ) @slow def __lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a = VideoMAEModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def __lowerCAmelCase ( self : int ) -> Any: """simple docstring""" if not self.has_attentions: pass else: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() _a = True for model_class in self.all_model_classes: _a = self.model_tester.seq_length - self.model_tester.num_masks _a = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) _a = True _a = False _a = True _a = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() with torch.no_grad(): _a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) _a = outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _a = True _a = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() with torch.no_grad(): _a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) _a = outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) _a = len(lowerCAmelCase_ ) # Check attention is always last and order is fine _a = True _a = True _a = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() with torch.no_grad(): _a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) self.assertEqual(out_len + 1 , len(lowerCAmelCase_ ) ) _a = outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def __lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" def check_hidden_states_output(lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ): _a = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() with torch.no_grad(): _a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) _a = outputs.hidden_states _a = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) _a = self.model_tester.seq_length - self.model_tester.num_masks _a = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _a = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" pass def snake_case_ (): '''simple docstring''' _a = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) _a = np.load(UpperCamelCase ) return list(UpperCamelCase ) @require_torch @require_vision class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" _a = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to( lowerCAmelCase_ ) _a = self.default_image_processor _a = prepare_video() _a = image_processor(lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ ) # forward pass with torch.no_grad(): _a = model(**lowerCAmelCase_ ) # verify the logits _a = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) _a = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) ) @slow def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" _a = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(lowerCAmelCase_ ) _a = self.default_image_processor _a = prepare_video() _a = image_processor(lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ ) # add boolean mask, indicating which patches to mask _a = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' ) _a = torch.load(lowerCAmelCase_ ) # forward pass with torch.no_grad(): _a = model(**lowerCAmelCase_ ) # verify the logits _a = torch.Size([1, 14_08, 15_36] ) _a = torch.tensor( [[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=lowerCAmelCase_ ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) _a = torch.tensor([0.5_1_4_2] , device=lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase_ , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) _a = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=lowerCAmelCase_ ).to( lowerCAmelCase_ ) with torch.no_grad(): _a = model(**lowerCAmelCase_ ) _a = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase_ , atol=1e-4 ) )
179
1
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
116
from manim import * class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def _lowerCAmelCase ( self ): A : Union[str, Any] = Rectangle(height=0.5, width=0.5 ) A : Optional[int] = Rectangle(height=0.25, width=0.25 ) A : Optional[Any] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 ) A : List[str] = [mem.copy() for i in range(6 )] A : Any = [mem.copy() for i in range(6 )] A : int = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : str = VGroup(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : List[Any] = Text("""CPU""", font_size=24 ) A : Optional[int] = Group(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0.5, aligned_edge=lowerCamelCase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase__ ) A : List[Any] = [mem.copy() for i in range(4 )] A : Optional[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : Dict = Text("""GPU""", font_size=24 ) A : Any = Group(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0.5, aligned_edge=lowerCamelCase__ ) gpu.move_to([-1, -1, 0] ) self.add(lowerCamelCase__ ) A : Optional[int] = [mem.copy() for i in range(6 )] A : List[str] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : Optional[int] = Text("""Model""", font_size=24 ) A : List[Any] = Group(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0.5, aligned_edge=lowerCamelCase__ ) model.move_to([3, -1.0, 0] ) self.add(lowerCamelCase__ ) A : Tuple = [] A : Tuple = [] A : Any = [] for i, rect in enumerate(lowerCamelCase__ ): rect.set_stroke(lowerCamelCase__ ) A : Any = Rectangle(height=0.46 / 4, width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__, opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=lowerCamelCase__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0], direction=lowerCamelCase__, buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1], direction=lowerCamelCase__, buff=0.0 ) self.add(lowerCamelCase__ ) model_cpu_arr.append(lowerCamelCase__ ) self.add(*lowerCamelCase__, *lowerCamelCase__, *lowerCamelCase__ ) A : int = [mem.copy() for i in range(6 )] A : List[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : str = Text("""Loaded Checkpoint""", font_size=24 ) A : List[str] = Group(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0.5, aligned_edge=lowerCamelCase__ ) checkpoint.move_to([3, 0.5, 0] ) self.add(lowerCamelCase__ ) A : Optional[int] = [] A : List[Any] = [] for i, rect in enumerate(lowerCamelCase__ ): A : int = fill.copy().set_fill(lowerCamelCase__, opacity=0.7 ) target.move_to(lowerCamelCase__ ) ckpt_arr.append(lowerCamelCase__ ) A : List[Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(lowerCamelCase__ ) self.add(*lowerCamelCase__, *lowerCamelCase__ ) A : str = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) A : List[Any] = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=18, ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCamelCase__, lowerCamelCase__ ) A : Union[str, Any] = MarkupText( f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''', font_size=18, ) blue_text.next_to(lowerCamelCase__, DOWN * 2.4, aligned_edge=key_text.get_left() ) self.add(lowerCamelCase__ ) A : List[str] = MarkupText( f'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''', font_size=24, ) step_a.move_to([2, 2, 0] ) A : List[str] = [meta_mem.copy() for i in range(6 )] A : List[Any] = [meta_mem.copy() for i in range(6 )] A : List[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : List[str] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : Dict = VGroup(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 ) A : Optional[Any] = Text("""Disk""", font_size=24 ) A : List[str] = Group(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0.5, aligned_edge=lowerCamelCase__ ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(lowerCamelCase__, run_time=3 ), Write(lowerCamelCase__, run_time=1 ), Create(lowerCamelCase__, run_time=1 ) ) A : str = [] for i, rect in enumerate(lowerCamelCase__ ): A : Optional[Any] = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(lowerCamelCase__, run_time=1.5 ) ) self.play(*lowerCamelCase__ ) self.play(FadeOut(lowerCamelCase__ ) ) A : List[str] = MarkupText(f'''Then, the checkpoint is removed from memory\nthrough garbage collection.''', font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase__, run_time=3 ) ) self.play( FadeOut(lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, *lowerCamelCase__ ), ) self.wait()
116
1
import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser( description=( "Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned" " Distillation" ) ) parser.add_argument("--model_type", default="bert", choices=["bert"]) parser.add_argument("--model_name", default="bert-base-uncased", type=str) parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str) parser.add_argument("--vocab_transform", action="store_true") __magic_name__ = parser.parse_args() if args.model_type == "bert": __magic_name__ = BertForMaskedLM.from_pretrained(args.model_name) __magic_name__ = "bert" else: raise ValueError("args.model_type should be \"bert\".") __magic_name__ = model.state_dict() __magic_name__ = {} for w in ["word_embeddings", "position_embeddings"]: __magic_name__ = state_dict[f'''{prefix}.embeddings.{w}.weight'''] for w in ["weight", "bias"]: __magic_name__ = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}'''] __magic_name__ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: __magic_name__ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}''' ] __magic_name__ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}''' ] __magic_name__ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}''' ] __magic_name__ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}''' ] __magic_name__ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}''' ] __magic_name__ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}''' ] __magic_name__ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}''' ] __magic_name__ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}''' ] std_idx += 1 __magic_name__ = state_dict["cls.predictions.decoder.weight"] __magic_name__ = state_dict["cls.predictions.bias"] if args.vocab_transform: for w in ["weight", "bias"]: __magic_name__ = state_dict[f'''cls.predictions.transform.dense.{w}'''] __magic_name__ = state_dict[f'''cls.predictions.transform.LayerNorm.{w}'''] print(f'''N layers selected for distillation: {std_idx}''') print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
152
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def _lowerCAmelCase ( A__: str , A__: List[str] , A__: str ): '''simple docstring''' UpperCAmelCase = AlbertConfig.from_json_file(A__ ) print(F"""Building PyTorch model from configuration: {config}""" ) UpperCAmelCase = AlbertForPreTraining(A__ ) # Load weights from tf checkpoint load_tf_weights_in_albert(A__ , A__ , A__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , A__ ) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--albert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained ALBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __magic_name__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
152
1
def __lowerCAmelCase ( a__ ) -> str: return "".join([hex(a__ )[2:].zfill(2 ).upper() for byte in list(a__ )] ) def __lowerCAmelCase ( a__ ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(a__ ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(a__ ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(a__ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
6
'''simple docstring''' import os __snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000} def a_ ( lowerCamelCase : str ): lowerCAmelCase = 0 lowerCAmelCase = 0 while index < len(lowerCamelCase ) - 1: lowerCAmelCase = SYMBOLS[numerals[index]] lowerCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def a_ ( lowerCamelCase : int ): lowerCAmelCase = '' lowerCAmelCase = num // 1000 numerals += m_count * "M" num %= 1000 lowerCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 lowerCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def a_ ( lowerCamelCase : str = "/p089_roman.txt" ): lowerCAmelCase = 0 with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea: lowerCAmelCase = filea.readlines() for line in lines: lowerCAmelCase = line.strip() lowerCAmelCase = parse_roman_numerals(lowerCamelCase ) lowerCAmelCase = generate_roman_numerals(lowerCamelCase ) savings += len(lowerCamelCase ) - len(lowerCamelCase ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
4
0
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) __A : int = logging.getLogger() def A_ ( snake_case_ : Path ,snake_case_ : list ): '''simple docstring''' UpperCamelCase : int = """\n""".join(snake_case_ ) Path(snake_case_ ).open("""w""" ).writelines(snake_case_ ) __A : Tuple = '''patrickvonplaten/t5-tiny-random''' __A : List[str] = '''sshleifer/bart-tiny-random''' __A : Union[str, Any] = '''sshleifer/tiny-mbart''' __A : Any = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowerCamelCase ( _UpperCAmelCase ): def a_ ( self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Dict = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source""" UpperCamelCase : List[Any] = input_file_name.parent / """utest_output.txt""" assert not output_file_name.exists() UpperCamelCase : Optional[int] = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""] _dump_articles(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" ) UpperCamelCase : Optional[int] = """translation_en_to_de""" if model == T5_TINY else """summarization""" UpperCamelCase : str = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split() with patch.object(SCREAMING_SNAKE_CASE_ , """argv""" , SCREAMING_SNAKE_CASE_ ): run_generate() assert Path(SCREAMING_SNAKE_CASE_ ).exists() # os.remove(Path(output_file_name)) def a_ ( self ): self.run_eval_tester(SCREAMING_SNAKE_CASE_ ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def a_ ( self , SCREAMING_SNAKE_CASE_ ): self.run_eval_tester(SCREAMING_SNAKE_CASE_ ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def a_ ( self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : int = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source""" UpperCamelCase : str = input_file_name.parent / """utest_output.txt""" assert not output_file_name.exists() UpperCamelCase : Dict = { """en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""], """de""": [ """Maschinelles Lernen ist großartig, oder?""", """Ich esse gerne Bananen""", """Morgen ist wieder ein toller Tag!""", ], } UpperCamelCase : List[Any] = Path(self.get_auto_remove_tmp_dir() ) UpperCamelCase : Tuple = str(tmp_dir / """scores.json""" ) UpperCamelCase : Optional[int] = str(tmp_dir / """val.target""" ) _dump_articles(SCREAMING_SNAKE_CASE_ , text["""en"""] ) _dump_articles(SCREAMING_SNAKE_CASE_ , text["""de"""] ) UpperCamelCase : Dict = """translation_en_to_de""" if model == T5_TINY else """summarization""" UpperCamelCase : str = f'\n run_eval_search.py\n {model}\n {str(SCREAMING_SNAKE_CASE_ )}\n {str(SCREAMING_SNAKE_CASE_ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split() testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] ) with patch.object(SCREAMING_SNAKE_CASE_ , """argv""" , SCREAMING_SNAKE_CASE_ ): with CaptureStdout() as cs: run_search() UpperCamelCase : Tuple = [""" num_beams | length_penalty""", model, """Best score args"""] UpperCamelCase : int = ["""Info"""] if "translation" in task: expected_strings.append("""bleu""" ) else: expected_strings.extend(SCREAMING_SNAKE_CASE_ ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(SCREAMING_SNAKE_CASE_ ).exists() os.remove(Path(SCREAMING_SNAKE_CASE_ ) )
363
"""simple docstring""" from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer __A : Any = logging.get_logger(__name__) __A : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} __A : Optional[Any] = { '''vocab_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json''' }, '''merges_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt''' }, } __A : Any = {'''allegro/herbert-base-cased''': 514} __A : Optional[Any] = {} class lowerCamelCase ( _UpperCAmelCase ): lowercase : Dict = VOCAB_FILES_NAMES lowercase : Any = PRETRAINED_VOCAB_FILES_MAP lowercase : List[str] = PRETRAINED_INIT_CONFIGURATION lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Union[str, Any] = HerbertTokenizer def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_="</s>" , **SCREAMING_SNAKE_CASE_ , ): super().__init__( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): UpperCamelCase : Dict = [self.cls_token_id] UpperCamelCase : str = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): UpperCamelCase : Tuple = [self.sep_token_id] UpperCamelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): UpperCamelCase : Optional[int] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ )
27
0
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase : '''simple docstring''' def __init__( self : List[Any] ,A : Optional[int] ,A : str=13 ,A : Optional[int]=7 ,A : Any=True ,A : Tuple=True ,A : Any=True ,A : str=True ,A : Tuple=99 ,A : str=32 ,A : Dict=5 ,A : List[str]=4 ,A : Tuple=37 ,A : int="gelu" ,A : str=0.1 ,A : Tuple=0.1 ,A : int=5_12 ,A : List[Any]=16 ,A : Optional[Any]=2 ,A : Dict=0.02 ,A : str=3 ,A : Dict=4 ,A : Dict=None ,): __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_input_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_labels __A = num_choices __A = scope def UpperCamelCase_ ( self : Dict ): __A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) __A = None if self.use_input_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) __A = None __A = None __A = None if self.use_labels: __A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) __A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) __A = ids_tensor([self.batch_size] ,self.num_choices ) __A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self : int ): return NystromformerConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,) def UpperCamelCase_ ( self : Dict ,A : Tuple ,A : Optional[Any] ,A : int ,A : Optional[int] ,A : Union[str, Any] ,A : int ,A : Optional[int] ): __A = NystromformerModel(config=A ) model.to(A ) model.eval() __A = model(A ,attention_mask=A ,token_type_ids=A ) __A = model(A ,token_type_ids=A ) __A = model(A ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self : Optional[int] ,A : Tuple ,A : Union[str, Any] ,A : Optional[int] ,A : Union[str, Any] ,A : int ,A : List[str] ,A : Optional[Any] ): __A = NystromformerForMaskedLM(config=A ) model.to(A ) model.eval() __A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self : int ,A : List[Any] ,A : str ,A : Any ,A : Union[str, Any] ,A : Any ,A : List[Any] ,A : Dict ): __A = NystromformerForQuestionAnswering(config=A ) model.to(A ) model.eval() __A = model( A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self : Optional[int] ,A : Tuple ,A : List[Any] ,A : str ,A : List[Any] ,A : Union[str, Any] ,A : Dict ,A : str ): __A = self.num_labels __A = NystromformerForSequenceClassification(A ) model.to(A ) model.eval() __A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : str ,A : int ,A : Dict ,A : Tuple ,A : Optional[Any] ,A : List[Any] ,A : Tuple ,A : Optional[Any] ): __A = self.num_labels __A = NystromformerForTokenClassification(config=A ) model.to(A ) model.eval() __A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ,A : List[str] ,A : Union[str, Any] ,A : Optional[Any] ,A : Dict ,A : Tuple ,A : Any ): __A = self.num_choices __A = NystromformerForMultipleChoice(config=A ) model.to(A ) model.eval() __A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() __A = model( A ,attention_mask=A ,token_type_ids=A ,labels=A ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def UpperCamelCase_ ( self : Dict ): __A = self.prepare_config_and_inputs() ( __A ) = config_and_inputs __A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): '''simple docstring''' snake_case_ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) snake_case_ = ( { "feature-extraction": NystromformerModel, "fill-mask": NystromformerForMaskedLM, "question-answering": NystromformerForQuestionAnswering, "text-classification": NystromformerForSequenceClassification, "token-classification": NystromformerForTokenClassification, "zero-shot": NystromformerForSequenceClassification, } if is_torch_available() else {} ) snake_case_ = False snake_case_ = False def UpperCamelCase_ ( self : Any ): __A = NystromformerModelTester(self ) __A = ConfigTester(self ,config_class=A ,hidden_size=37 ) def UpperCamelCase_ ( self : Tuple ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self : List[str] ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self : Tuple ): __A = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __A = type self.model_tester.create_and_check_model(*A ) def UpperCamelCase_ ( self : List[str] ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A ) def UpperCamelCase_ ( self : Optional[int] ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A ) def UpperCamelCase_ ( self : Any ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def UpperCamelCase_ ( self : str ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def UpperCamelCase_ ( self : Dict ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @slow def UpperCamelCase_ ( self : List[Any] ): for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A = NystromformerModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_torch class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self : str ): __A = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" ) __A = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __A = model(A )[0] __A = torch.Size((1, 6, 7_68) ) self.assertEqual(output.shape ,A ) __A = torch.tensor( [[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,A ,atol=1E-4 ) ) @slow def UpperCamelCase_ ( self : Union[str, Any] ): __A = """the [MASK] of Belgium is Brussels""" __A = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" ) __A = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" ) __A = tokenizer(A ,return_tensors="pt" ) with torch.no_grad(): __A = model(encoding.input_ids ).logits __A = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(A ) ,"capital" )
15
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase : Tuple = logging.get_logger(__name__) lowerCamelCase : int = { 'nvidia/segformer-b0-finetuned-ade-512-512': ( 'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class __lowercase (UpperCamelCase__ ): """simple docstring""" _snake_case = """segformer""" def __init__( self , A=3 , A=4 , A=[2, 2, 2, 2] , A=[8, 4, 2, 1] , A=[3_2, 6_4, 1_6_0, 2_5_6] , A=[7, 3, 3, 3] , A=[4, 2, 2, 2] , A=[1, 2, 5, 8] , A=[4, 4, 4, 4] , A="gelu" , A=0.0 , A=0.0 , A=0.1 , A=0.02 , A=0.1 , A=1e-6 , A=2_5_6 , A=2_5_5 , **A , ) -> Dict: super().__init__(**A ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""" , A , ) snake_case : List[str] = num_channels snake_case : Optional[int] = num_encoder_blocks snake_case : Optional[int] = depths snake_case : str = sr_ratios snake_case : str = hidden_sizes snake_case : Any = patch_sizes snake_case : Tuple = strides snake_case : List[str] = mlp_ratios snake_case : Optional[Any] = num_attention_heads snake_case : int = hidden_act snake_case : Tuple = hidden_dropout_prob snake_case : Any = attention_probs_dropout_prob snake_case : List[Any] = classifier_dropout_prob snake_case : Optional[Any] = initializer_range snake_case : Optional[Any] = drop_path_rate snake_case : int = layer_norm_eps snake_case : Optional[Any] = decoder_hidden_size snake_case : Tuple = kwargs.get("""reshape_last_stage""" , A ) snake_case : List[str] = semantic_loss_ignore_index class __lowercase (UpperCamelCase__ ): """simple docstring""" _snake_case = version.parse("""1.11""" ) @property def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCAmelCase ( self ) -> float: return 1e-4 @property def UpperCAmelCase ( self ) -> int: return 1_2
124
0
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def lowerCAmelCase_ ( __a ) -> int: """simple docstring""" lowerCamelCase__: List[Any] =args.pruning_method lowerCamelCase__: int =args.threshold lowerCamelCase__: Any =args.model_name_or_path.rstrip("/" ) lowerCamelCase__: str =args.target_model_path print(F"""Load fine-pruned model from {model_name_or_path}""" ) lowerCamelCase__: int =torch.load(os.path.join(__a , "pytorch_model.bin" ) ) lowerCamelCase__: Optional[int] ={} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowerCamelCase__: Any =tensor print(F"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: lowerCamelCase__: Dict =tensor print(F"""Copied layer {name}""" ) elif "bias" in name: lowerCamelCase__: List[Any] =tensor print(F"""Copied layer {name}""" ) else: if pruning_method == "magnitude": lowerCamelCase__: Any =MagnitudeBinarizer.apply(inputs=__a , threshold=__a ) lowerCamelCase__: Optional[int] =tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue lowerCamelCase__: Union[str, Any] =name[:-6] lowerCamelCase__: Any =model[F"""{prefix_}mask_scores"""] lowerCamelCase__: List[Any] =TopKBinarizer.apply(__a , __a ) lowerCamelCase__: Optional[int] =tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowerCamelCase__: Tuple =name[:-6] lowerCamelCase__: List[Any] =model[F"""{prefix_}mask_scores"""] lowerCamelCase__: Any =ThresholdBinarizer.apply(__a , __a , __a ) lowerCamelCase__: Tuple =tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue lowerCamelCase__: str =name[:-6] lowerCamelCase__: List[str] =model[F"""{prefix_}mask_scores"""] lowerCamelCase__ , lowerCamelCase__: List[str] =-0.1, 1.1 lowerCamelCase__: Optional[Any] =torch.sigmoid(__a ) lowerCamelCase__: Any =s * (r - l) + l lowerCamelCase__: List[Any] =s_bar.clamp(min=0.0 , max=1.0 ) lowerCamelCase__: Optional[int] =tensor * mask print(F"""Pruned layer {name}""" ) else: raise ValueError("Unknown pruning method" ) if target_model_path is None: lowerCamelCase__: Optional[int] =os.path.join( os.path.dirname(__a ) , F"""bertarized_{os.path.basename(__a )}""" ) if not os.path.isdir(__a ): shutil.copytree(__a , __a ) print(F"""\nCreated folder {target_model_path}""" ) torch.save(__a , os.path.join(__a , "pytorch_model.bin" ) ) print("\nPruned model saved! See you later!" ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "magnitude", "topK", "sigmoied_threshold"], type=str, required=True, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)" ), ) parser.add_argument( "--threshold", type=float, required=False, help=( "For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model." "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared." "Not needed for `l0`" ), ) parser.add_argument( "--model_name_or_path", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) parser.add_argument( "--target_model_path", default=None, type=str, required=False, help="Folder containing the model that was previously fine-pruned", ) __A = parser.parse_args() main(args)
273
import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Tuple: '''simple docstring''' lowerCamelCase__: List[str] =inspect.getfile(accelerate.test_utils) lowerCamelCase__: str =os.path.sep.join( mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps", "test_metrics.py"]) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 lowerCamelCase__: str =test_metrics @require_cpu def SCREAMING_SNAKE_CASE_ (self : int) ->Any: '''simple docstring''' debug_launcher(self.test_metrics.main , num_processes=1) @require_cpu def SCREAMING_SNAKE_CASE_ (self : int) ->int: '''simple docstring''' debug_launcher(self.test_metrics.main) @require_single_gpu def SCREAMING_SNAKE_CASE_ (self : Any) ->int: '''simple docstring''' self.test_metrics.main() @require_multi_gpu def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict: '''simple docstring''' print(F"""Found {torch.cuda.device_count()} devices.""") lowerCamelCase__: Optional[Any] =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy())
273
1
import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor lowerCamelCase = logging.get_logger(__name__) class A ( UpperCamelCase_ ): def __init__( self : int , *lowercase_ : Tuple , **lowercase_ : Union[str, Any] ) -> None: """simple docstring""" warnings.warn( 'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use DeformableDetrImageProcessor instead.' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
199
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class A ( ctypes.Structure ): # _fields is a specific attr expected by ctypes UpperCamelCase__ : List[Any] =[('size', ctypes.c_int), ('visible', ctypes.c_byte)] def a_ ( ): '''simple docstring''' if os.name == "nt": _lowerCamelCase : Optional[Any] =CursorInfo() _lowerCamelCase : Dict =ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) ) _lowerCamelCase : Any =False ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) ) elif os.name == "posix": sys.stdout.write('\033[?25l' ) sys.stdout.flush() def a_ ( ): '''simple docstring''' if os.name == "nt": _lowerCamelCase : Any =CursorInfo() _lowerCamelCase : Optional[Any] =ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) ) _lowerCamelCase : Union[str, Any] =True ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) ) elif os.name == "posix": sys.stdout.write('\033[?25h' ) sys.stdout.flush() @contextmanager def a_ ( ): '''simple docstring''' try: hide_cursor() yield finally: show_cursor()
199
1
from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[Any] = ['''flax'''] def __init__( self : str , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict ): requires_backends(self , ["flax"] ) @classmethod def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Dict ): requires_backends(cls , ["flax"] ) @classmethod def _A ( cls : Tuple , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int ): requires_backends(cls , ["flax"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = ['''flax'''] def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Dict ): requires_backends(self , ["flax"] ) @classmethod def _A ( cls : Any , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Dict ): requires_backends(cls , ["flax"] ) @classmethod def _A ( cls : Tuple , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ): requires_backends(cls , ["flax"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = ['''flax'''] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Dict ): requires_backends(self , ["flax"] ) @classmethod def _A ( cls : List[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(cls , ["flax"] ) @classmethod def _A ( cls : int , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ): requires_backends(cls , ["flax"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = ['''flax'''] def __init__( self : List[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[Any] ): requires_backends(self , ["flax"] ) @classmethod def _A ( cls : int , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : str ): requires_backends(cls , ["flax"] ) @classmethod def _A ( cls : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ): requires_backends(cls , ["flax"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Any = ['''flax'''] def __init__( self : List[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Tuple ): requires_backends(self , ["flax"] ) @classmethod def _A ( cls : List[str] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ): requires_backends(cls , ["flax"] ) @classmethod def _A ( cls : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ): requires_backends(cls , ["flax"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : int = ['''flax'''] def __init__( self : Tuple , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ): requires_backends(self , ["flax"] ) @classmethod def _A ( cls : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ): requires_backends(cls , ["flax"] ) @classmethod def _A ( cls : Tuple , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[Any] ): requires_backends(cls , ["flax"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[str] = ['''flax'''] def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int] ): requires_backends(self , ["flax"] ) @classmethod def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(cls , ["flax"] ) @classmethod def _A ( cls : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ): requires_backends(cls , ["flax"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Optional[int] = ['''flax'''] def __init__( self : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict ): requires_backends(self , ["flax"] ) @classmethod def _A ( cls : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ): requires_backends(cls , ["flax"] ) @classmethod def _A ( cls : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple ): requires_backends(cls , ["flax"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = ['''flax'''] def __init__( self : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ): requires_backends(self , ["flax"] ) @classmethod def _A ( cls : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any] ): requires_backends(cls , ["flax"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(cls , ["flax"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = ['''flax'''] def __init__( self : int , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ): requires_backends(self , ["flax"] ) @classmethod def _A ( cls : List[str] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(cls , ["flax"] ) @classmethod def _A ( cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str ): requires_backends(cls , ["flax"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = ['''flax'''] def __init__( self : Tuple , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple ): requires_backends(self , ["flax"] ) @classmethod def _A ( cls : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ): requires_backends(cls , ["flax"] ) @classmethod def _A ( cls : int , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any ): requires_backends(cls , ["flax"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = ['''flax'''] def __init__( self : List[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : int ): requires_backends(self , ["flax"] ) @classmethod def _A ( cls : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : str ): requires_backends(cls , ["flax"] ) @classmethod def _A ( cls : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ): requires_backends(cls , ["flax"] ) class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : int = ['''flax'''] def __init__( self : Optional[int] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ): requires_backends(self , ["flax"] ) @classmethod def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ): requires_backends(cls , ["flax"] ) @classmethod def _A ( cls : Any , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int] ): requires_backends(cls , ["flax"] )
367
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Dict = '''timm_backbone''' def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ): super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = backbone SCREAMING_SNAKE_CASE : List[str] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = features_only SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
319
0
"""simple docstring""" from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def _lowercase ( __snake_case = "laptop" ) -> DataFrame: __lowerCAmelCase : str = F"""https://www.amazon.in/laptop/s?k={product}""" __lowerCAmelCase : Union[str, Any] = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36", "Accept-Language": "en-US, en;q=0.5", } __lowerCAmelCase : List[str] = BeautifulSoup(requests.get(__snake_case ,headers=__snake_case ).text ) # Initialize a Pandas dataframe with the column titles __lowerCAmelCase : Dict = DataFrame( columns=[ "Product Title", "Product Link", "Current Price of the product", "Product Rating", "MRP of the product", "Discount", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( "div" ,attrs={"class": "s-result-item", "data-component-type": "s-search-result"} ,) ,soup.find_all("div" ,attrs={"class": "a-row a-size-base a-color-base"} ) ,): try: __lowerCAmelCase : Any = item.ha.text __lowerCAmelCase : Union[str, Any] = "https://www.amazon.in/" + item.ha.a["href"] __lowerCAmelCase : Any = item.find("span" ,attrs={"class": "a-offscreen"} ).text try: __lowerCAmelCase : Union[str, Any] = item.find("span" ,attrs={"class": "a-icon-alt"} ).text except AttributeError: __lowerCAmelCase : Optional[Any] = "Not available" try: __lowerCAmelCase : Union[str, Any] = ( "₹" + item.find( "span" ,attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1] ) except AttributeError: __lowerCAmelCase : Dict = "" try: __lowerCAmelCase : str = float( ( ( float(product_mrp.strip("₹" ).replace("," ,"" ) ) - float(product_price.strip("₹" ).replace("," ,"" ) ) ) / float(product_mrp.strip("₹" ).replace("," ,"" ) ) ) * 100 ) except ValueError: __lowerCAmelCase : List[str] = float("nan" ) except AttributeError: pass __lowerCAmelCase : int = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] __lowerCAmelCase : Union[str, Any] = " " __lowerCAmelCase : Union[str, Any] = " " data_frame.index += 1 return data_frame if __name__ == "__main__": __snake_case : Any = 'headphones' get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
269
"""simple docstring""" from __future__ import annotations def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> list: __lowerCAmelCase : Dict = [] __lowerCAmelCase , __lowerCAmelCase : Any = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) __lowerCAmelCase : int = result + left + right return input_list def _lowercase ( __snake_case ) -> list: if len(__snake_case ) <= 1: return input_list __lowerCAmelCase : int = list(__snake_case ) # iteration for two-way merging __lowerCAmelCase : Optional[int] = 2 while p <= len(__snake_case ): # getting low, high and middle value for merge-sort of single list for i in range(0 ,len(__snake_case ) ,__snake_case ): __lowerCAmelCase : Union[str, Any] = i __lowerCAmelCase : Tuple = i + p - 1 __lowerCAmelCase : Optional[Any] = (low + high + 1) // 2 __lowerCAmelCase : Any = merge(__snake_case ,__snake_case ,__snake_case ,__snake_case ) # final merge of last two parts if p * 2 >= len(__snake_case ): __lowerCAmelCase : Optional[Any] = i __lowerCAmelCase : Union[str, Any] = merge(__snake_case ,0 ,__snake_case ,len(__snake_case ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": __snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip() if user_input == "": __snake_case : Optional[int] = [] else: __snake_case : int = [int(item.strip()) for item in user_input.split(',')] print(iter_merge_sort(unsorted))
269
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ): __SCREAMING_SNAKE_CASE :Any = ShapEImgaImgPipeline __SCREAMING_SNAKE_CASE :Dict = ["""image"""] __SCREAMING_SNAKE_CASE :Tuple = ["""image"""] __SCREAMING_SNAKE_CASE :List[str] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] __SCREAMING_SNAKE_CASE :Any = False @property def snake_case__ ( self : Optional[int] ): return 32 @property def snake_case__ ( self : int ): return 32 @property def snake_case__ ( self : int ): return self.time_input_dim * 4 @property def snake_case__ ( self : Tuple ): return 8 @property def snake_case__ ( self : List[str] ): torch.manual_seed(0 ) __magic_name__ = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __magic_name__ = CLIPVisionModel(a__ ) return model @property def snake_case__ ( self : Union[str, Any] ): __magic_name__ = CLIPImageProcessor( crop_size=224 , do_center_crop=a__ , do_normalize=a__ , do_resize=a__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , ) return image_processor @property def snake_case__ ( self : List[Any] ): torch.manual_seed(0 ) __magic_name__ = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } __magic_name__ = PriorTransformer(**a__ ) return model @property def snake_case__ ( self : List[str] ): torch.manual_seed(0 ) __magic_name__ = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } __magic_name__ = ShapERenderer(**a__ ) return model def snake_case__ ( self : Optional[Any] ): __magic_name__ = self.dummy_prior __magic_name__ = self.dummy_image_encoder __magic_name__ = self.dummy_image_processor __magic_name__ = self.dummy_renderer __magic_name__ = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , ) __magic_name__ = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def snake_case__ ( self : Optional[Any] , a__ : List[Any] , a__ : Union[str, Any]=0 ): __magic_name__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ ) if str(a__ ).startswith('''mps''' ): __magic_name__ = torch.manual_seed(a__ ) else: __magic_name__ = torch.Generator(device=a__ ).manual_seed(a__ ) __magic_name__ = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def snake_case__ ( self : Dict ): __magic_name__ = '''cpu''' __magic_name__ = self.get_dummy_components() __magic_name__ = self.pipeline_class(**a__ ) __magic_name__ = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) __magic_name__ = pipe(**self.get_dummy_inputs(a__ ) ) __magic_name__ = output.images[0] __magic_name__ = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __magic_name__ = np.array( [ 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def snake_case__ ( self : Optional[int] ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def snake_case__ ( self : Any ): __magic_name__ = torch_device == '''cpu''' __magic_name__ = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , ) def snake_case__ ( self : List[str] ): __magic_name__ = self.get_dummy_components() __magic_name__ = self.pipeline_class(**a__ ) __magic_name__ = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) __magic_name__ = 1 __magic_name__ = 2 __magic_name__ = self.get_dummy_inputs(a__ ) for key in inputs.keys(): if key in self.batch_params: __magic_name__ = batch_size * [inputs[key]] __magic_name__ = pipe(**a__ , num_images_per_prompt=a__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def snake_case__ ( self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : Dict ): __magic_name__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) __magic_name__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) __magic_name__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) __magic_name__ = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) __magic_name__ = torch.Generator(device=a__ ).manual_seed(0 ) __magic_name__ = pipe( a__ , generator=a__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(a__ , a__ )
98
'''simple docstring''' from __future__ import annotations import math import random from typing import Any class _SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] ): __magic_name__ = [] __magic_name__ = 0 __magic_name__ = 0 def snake_case__ ( self : int ): return self.head == self.tail def snake_case__ ( self : int , a__ : Any ): self.data.append(a__ ) __magic_name__ = self.tail + 1 def snake_case__ ( self : Tuple ): __magic_name__ = self.data[self.head] __magic_name__ = self.head + 1 return ret def snake_case__ ( self : Optional[Any] ): return self.tail - self.head def snake_case__ ( self : List[Any] ): print(self.data ) print('''**************''' ) print(self.data[self.head : self.tail] ) class _SCREAMING_SNAKE_CASE : def __init__( self : List[str] , a__ : Any ): __magic_name__ = data __magic_name__ = None __magic_name__ = None __magic_name__ = 1 def snake_case__ ( self : Optional[int] ): return self.data def snake_case__ ( self : List[Any] ): return self.left def snake_case__ ( self : Tuple ): return self.right def snake_case__ ( self : Any ): return self.height def snake_case__ ( self : Optional[Any] , a__ : Any ): __magic_name__ = data def snake_case__ ( self : int , a__ : MyNode | None ): __magic_name__ = node def snake_case__ ( self : Tuple , a__ : MyNode | None ): __magic_name__ = node def snake_case__ ( self : List[str] , a__ : int ): __magic_name__ = height def UpperCamelCase ( a ) -> int: '''simple docstring''' if node is None: return 0 return node.get_height() def UpperCamelCase ( a , a ) -> int: '''simple docstring''' if a > b: return a return b def UpperCamelCase ( a ) -> MyNode: '''simple docstring''' print('''left rotation node:''' , node.get_data() ) __magic_name__ = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(a ) __magic_name__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(a ) __magic_name__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(a ) return ret def UpperCamelCase ( a ) -> MyNode: '''simple docstring''' print('''right rotation node:''' , node.get_data() ) __magic_name__ = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(a ) __magic_name__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(a ) __magic_name__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(a ) return ret def UpperCamelCase ( a ) -> MyNode: '''simple docstring''' __magic_name__ = node.get_left() assert left_child is not None node.set_left(left_rotation(a ) ) return right_rotation(a ) def UpperCamelCase ( a ) -> MyNode: '''simple docstring''' __magic_name__ = node.get_right() assert right_child is not None node.set_right(right_rotation(a ) ) return left_rotation(a ) def UpperCamelCase ( a , a ) -> MyNode | None: '''simple docstring''' if node is None: return MyNode(a ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , a ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected __magic_name__ = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child __magic_name__ = right_rotation(a ) else: __magic_name__ = lr_rotation(a ) else: node.set_right(insert_node(node.get_right() , a ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: __magic_name__ = node.get_right() assert right_child is not None if data < right_child.get_data(): __magic_name__ = rl_rotation(a ) else: __magic_name__ = left_rotation(a ) __magic_name__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(a ) return node def UpperCamelCase ( a ) -> Any: '''simple docstring''' while True: __magic_name__ = root.get_right() if right_child is None: break __magic_name__ = right_child return root.get_data() def UpperCamelCase ( a ) -> Any: '''simple docstring''' while True: __magic_name__ = root.get_left() if left_child is None: break __magic_name__ = left_child return root.get_data() def UpperCamelCase ( a , a ) -> MyNode | None: '''simple docstring''' __magic_name__ = root.get_left() __magic_name__ = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: __magic_name__ = get_left_most(a ) root.set_data(a ) root.set_right(del_node(a , a ) ) elif left_child is not None: __magic_name__ = left_child elif right_child is not None: __magic_name__ = right_child else: return None elif root.get_data() > data: if left_child is None: print('''No such data''' ) return root else: root.set_left(del_node(a , a ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(a , a ) ) if get_height(a ) - get_height(a ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): __magic_name__ = left_rotation(a ) else: __magic_name__ = rl_rotation(a ) elif get_height(a ) - get_height(a ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): __magic_name__ = right_rotation(a ) else: __magic_name__ = lr_rotation(a ) __magic_name__ = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(a ) return root class _SCREAMING_SNAKE_CASE : def __init__( self : List[Any] ): __magic_name__ = None def snake_case__ ( self : List[Any] ): return get_height(self.root ) def snake_case__ ( self : Optional[int] , a__ : Any ): print('''insert:''' + str(a__ ) ) __magic_name__ = insert_node(self.root , a__ ) def snake_case__ ( self : Dict , a__ : Any ): print('''delete:''' + str(a__ ) ) if self.root is None: print('''Tree is empty!''' ) return __magic_name__ = del_node(self.root , a__ ) def __str__( self : Optional[Any] , ): # a level traversale, gives a more intuitive look on the tree __magic_name__ = '''''' __magic_name__ = MyQueue() q.push(self.root ) __magic_name__ = self.get_height() if layer == 0: return output __magic_name__ = 0 while not q.is_empty(): __magic_name__ = q.pop() __magic_name__ = ''' ''' * int(math.pow(2 , layer - 1 ) ) output += space if node is None: output += "*" q.push(a__ ) q.push(a__ ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space __magic_name__ = cnt + 1 for i in range(100 ): if cnt == math.pow(2 , a__ ) - 1: __magic_name__ = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def UpperCamelCase ( ) -> None: '''simple docstring''' import doctest doctest.testmod() if __name__ == "__main__": _test() _lowerCAmelCase = AVLtree() _lowerCAmelCase = list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
98
1
import random def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ): lowercase , lowercase , lowercase :List[Any] = [], [], [] for element in data: if element < pivot: less.append(__lowerCamelCase ) elif element > pivot: greater.append(__lowerCamelCase ) else: equal.append(__lowerCamelCase ) return less, equal, greater def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ): # index = len(items) // 2 when trying to find the median # (value of index when items is sorted) # invalid input if index >= len(__lowerCamelCase ) or index < 0: return None lowercase :int = items[random.randint(0, len(__lowerCamelCase ) - 1 )] lowercase :str = 0 lowercase , lowercase , lowercase :List[str] = _partition(__lowerCamelCase, __lowerCamelCase ) lowercase :Dict = len(__lowerCamelCase ) lowercase :Union[str, Any] = len(__lowerCamelCase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__lowerCamelCase, __lowerCamelCase ) # must be in larger else: return quick_select(__lowerCamelCase, index - (m + count) )
236
"""simple docstring""" import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename UpperCAmelCase__ = 'http://www.mocksite.com/file1.txt' UpperCAmelCase__ = '"text": ["foo", "foo"]' UpperCAmelCase__ = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8' class lowerCAmelCase__ : __a = 200 __a = {"""Content-Length""": """100"""} __a = {} def lowercase ( self : List[str] , **_lowerCamelCase : List[str] ): return [bytes(_lowerCamelCase , '''utf-8''' )] def _UpperCAmelCase ( *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> Dict: return MockResponse() @pytest.mark.parametrize('''urls_type''' , [str, list, dict] ) def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int: import requests monkeypatch.setattr(__lowerCamelCase , '''request''' , __lowerCamelCase ) _snake_case = URL if issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = url elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = [url] elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = {'''train''': url} _snake_case = '''dummy''' _snake_case = '''downloads''' _snake_case = tmp_path _snake_case = DownloadConfig( cache_dir=os.path.join(__lowerCamelCase , __lowerCamelCase ) , use_etag=__lowerCamelCase , ) _snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase ) _snake_case = dl_manager.download(__lowerCamelCase ) _snake_case = urls for downloaded_paths in [downloaded_paths]: if isinstance(__lowerCamelCase , __lowerCamelCase ): _snake_case = [downloaded_paths] _snake_case = [urls] elif isinstance(__lowerCamelCase , __lowerCamelCase ): assert "train" in downloaded_paths.keys() _snake_case = downloaded_paths.values() _snake_case = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(__lowerCamelCase , __lowerCamelCase ): assert downloaded_path == dl_manager.downloaded_paths[input_url] _snake_case = Path(__lowerCamelCase ) _snake_case = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() _snake_case = downloaded_path.read_text() assert content == CONTENT _snake_case = downloaded_path.with_suffix('''.json''' ) assert metadata_downloaded_path.exists() _snake_case = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('''paths_type''' , [str, list, dict] ) def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> int: _snake_case = str(__lowerCamelCase ) if issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = filename elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = [filename] elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = {'''train''': filename} _snake_case = '''dummy''' _snake_case = xz_file.parent _snake_case = '''extracted''' _snake_case = DownloadConfig( cache_dir=__lowerCamelCase , use_etag=__lowerCamelCase , ) _snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase ) _snake_case = dl_manager.extract(__lowerCamelCase ) _snake_case = paths for extracted_paths in [extracted_paths]: if isinstance(__lowerCamelCase , __lowerCamelCase ): _snake_case = [extracted_paths] _snake_case = [paths] elif isinstance(__lowerCamelCase , __lowerCamelCase ): assert "train" in extracted_paths.keys() _snake_case = extracted_paths.values() _snake_case = paths.values() assert extracted_paths for extracted_path, input_path in zip(__lowerCamelCase , __lowerCamelCase ): assert extracted_path == dl_manager.extracted_paths[input_path] _snake_case = Path(__lowerCamelCase ) _snake_case = extracted_path.parts assert parts[-1] == hash_url_to_filename(__lowerCamelCase , etag=__lowerCamelCase ) assert parts[-2] == extracted_subdir assert extracted_path.exists() _snake_case = extracted_path.read_text() _snake_case = text_file.read_text() assert extracted_file_content == expected_file_content def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ) -> Dict: assert path.endswith('''.jsonl''' ) for num_items, line in enumerate(__lowerCamelCase , start=1 ): _snake_case = json.loads(line.decode('''utf-8''' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] ) def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : str ) -> Dict: _snake_case = request.getfixturevalue(__lowerCamelCase ) _snake_case = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ): _test_jsonl(__lowerCamelCase , __lowerCamelCase ) assert num_jsonl == 2 @pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] ) def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Tuple: _snake_case = request.getfixturevalue(__lowerCamelCase ) _snake_case = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ): _test_jsonl(__lowerCamelCase , __lowerCamelCase ) assert num_tar == 1 assert num_jsonl == 2 def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> List[Any]: _snake_case = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ) , start=1 ): assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
288
0
import unittest from knapsack import knapsack as k class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ): A : Optional[Any] = 0 A : str = [0] A : Tuple = [0] A : str = len(lowerCamelCase__ ) self.assertEqual(k.knapsack(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ), 0 ) A : int = [60] A : Union[str, Any] = [10] A : Tuple = len(lowerCamelCase__ ) self.assertEqual(k.knapsack(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ), 0 ) def _lowerCAmelCase ( self ): A : List[str] = 3 A : int = [1, 2, 3] A : Optional[int] = [3, 2, 1] A : str = len(lowerCamelCase__ ) self.assertEqual(k.knapsack(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ), 5 ) def _lowerCAmelCase ( self ): A : Any = 50 A : List[str] = [60, 100, 120] A : Any = [10, 20, 30] A : Tuple = len(lowerCamelCase__ ) self.assertEqual(k.knapsack(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ), 220 ) if __name__ == "__main__": unittest.main()
115
from __future__ import annotations def __UpperCamelCase ( _lowerCAmelCase ) -> list[int]: """simple docstring""" A : Tuple = 2 A : List[Any] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(_lowerCAmelCase ) if n > 1: factors.append(_lowerCAmelCase ) return factors if __name__ == "__main__": import doctest doctest.testmod()
115
1
'''simple docstring''' def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ): """simple docstring""" return [sentence[i : i + ngram_size] for i in range(len(__SCREAMING_SNAKE_CASE ) - ngram_size + 1 )] if __name__ == "__main__": from doctest import testmod testmod()
93
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets UpperCAmelCase_ : Optional[Any] = datasets.logging.get_logger(__name__) UpperCAmelCase_ : List[str] = """\ @InProceedings{moosavi2019minimum, author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube}, title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection}, year = {2019}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Florence, Italy}, } @inproceedings{10.3115/1072399.1072405, author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette}, title = {A Model-Theoretic Coreference Scoring Scheme}, year = {1995}, isbn = {1558604022}, publisher = {Association for Computational Linguistics}, address = {USA}, url = {https://doi.org/10.3115/1072399.1072405}, doi = {10.3115/1072399.1072405}, booktitle = {Proceedings of the 6th Conference on Message Understanding}, pages = {45–52}, numpages = {8}, location = {Columbia, Maryland}, series = {MUC6 ’95} } @INPROCEEDINGS{Bagga98algorithmsfor, author = {Amit Bagga and Breck Baldwin}, title = {Algorithms for Scoring Coreference Chains}, booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference}, year = {1998}, pages = {563--566} } @INPROCEEDINGS{Luo05oncoreference, author = {Xiaoqiang Luo}, title = {On coreference resolution performance metrics}, booktitle = {In Proc. of HLT/EMNLP}, year = {2005}, pages = {25--32}, publisher = {URL} } @inproceedings{moosavi-strube-2016-coreference, title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\", author = \"Moosavi, Nafise Sadat and Strube, Michael\", booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\", month = aug, year = \"2016\", address = \"Berlin, Germany\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/P16-1060\", doi = \"10.18653/v1/P16-1060\", pages = \"632--642\", } """ UpperCAmelCase_ : Tuple = """\ CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995], B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005], LEA [Moosavi and Strube, 2016] and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) [Denis and Baldridge, 2009a; Pradhan et al., 2011]. This wrapper of CoVal currently only work with CoNLL line format: The CoNLL format has one word per line with all the annotation for this word in column separated by spaces: Column Type Description 1 Document ID This is a variation on the document filename 2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. 3 Word number 4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. 5 Part-of-Speech 6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column. 7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\" 8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7. 9 Word sense This is the word sense of the word in Column 3. 10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. 11 Named Entities These columns identifies the spans representing various named entities. 12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7. N Coreference Coreference chain information encoded in a parenthesis structure. More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md CoVal code was written by @ns-moosavi. Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py The test suite is taken from https://github.com/conll/reference-coreference-scorers/ Mention evaluation and the test suite are added by @andreasvc. Parsing CoNLL files is developed by Leo Born. """ UpperCAmelCase_ : Union[str, Any] = """ Calculates coreference evaluation metrics. Args: predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format. Each prediction is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format. Each reference is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. keep_singletons: After extracting all mentions of key or system files, mentions whose corresponding coreference chain is of size one, are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting 'keep_singletons=False', all singletons in the key and system files will be excluded from the evaluation. NP_only: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs. min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the MINA algorithm. Returns: 'mentions': mentions 'muc': MUC metric [Vilain et al, 1995] 'bcub': B-cubed [Bagga and Baldwin, 1998] 'ceafe': CEAFe [Luo et al., 2005] 'lea': LEA [Moosavi and Strube, 2016] 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) Examples: >>> coval = datasets.load_metric('coval') >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -', ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)', ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)', ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -', ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -', ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -'] >>> references = [words] >>> predictions = [words] >>> results = coval.compute(predictions=predictions, references=references) >>> print(results) # doctest:+ELLIPSIS {'mentions/recall': 1.0,[...] 'conll_score': 100.0} """ def _A (__a , __a , __a=False , __a=False , __a=True , __a=False , __a="dummy_doc" ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = {doc: key_lines} SCREAMING_SNAKE_CASE_ : List[str] = {doc: sys_lines} SCREAMING_SNAKE_CASE_ : Dict = {} SCREAMING_SNAKE_CASE_ : Dict = 0 SCREAMING_SNAKE_CASE_ : List[str] = 0 SCREAMING_SNAKE_CASE_ : Tuple = 0 SCREAMING_SNAKE_CASE_ : int = 0 SCREAMING_SNAKE_CASE_ : List[str] = 0 SCREAMING_SNAKE_CASE_ : Any = 0 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = reader.get_doc_mentions(__a , key_doc_lines[doc] , __a ) key_singletons_num += singletons_num if NP_only or min_span: SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = reader.get_doc_mentions(__a , sys_doc_lines[doc] , __a ) sys_singletons_num += singletons_num if NP_only or min_span: SCREAMING_SNAKE_CASE_ : Union[str, Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a ) if remove_nested: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( '''Number of removed nested coreferring mentions in the key ''' f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' ) logger.info( '''Number of resulting singleton clusters in the key ''' f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' ) if not keep_singletons: logger.info( f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ' '''files, respectively''' ) return doc_coref_infos def _A (__a , __a , __a , __a , __a , __a , __a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = get_coref_infos(__a , __a , __a , __a , __a , __a ) SCREAMING_SNAKE_CASE_ : str = {} SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0 SCREAMING_SNAKE_CASE_ : str = 0 for name, metric in metrics: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = evaluator.evaluate_documents(__a , __a , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} ) logger.info( name.ljust(10 ) , f'Recall: {recall * 1_00:.2f}' , f' Precision: {precision * 1_00:.2f}' , f' F1: {fa * 1_00:.2f}' , ) if conll_subparts_num == 3: SCREAMING_SNAKE_CASE_ : Tuple = (conll / 3) * 1_00 logger.info(f'CoNLL score: {conll:.2f}' ) output_scores.update({'''conll_score''': conll} ) return output_scores def _A (__a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = False for line in key_lines: if not line.startswith('''#''' ): if len(line.split() ) > 6: SCREAMING_SNAKE_CASE_ : Any = line.split()[5] if not parse_col == "-": SCREAMING_SNAKE_CASE_ : Any = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''')), '''references''': datasets.Sequence(datasets.Value('''string''')), }) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[ '''https://github.com/ns-moosavi/coval''', '''https://www.aclweb.org/anthology/P16-1060''', '''http://www.conll.cemantix.org/2012/data.html''', ] , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Dict=True , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=False , lowercase_ : Dict=False): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = [ ('''mentions''', evaluator.mentions), ('''muc''', evaluator.muc), ('''bcub''', evaluator.b_cubed), ('''ceafe''', evaluator.ceafe), ('''lea''', evaluator.lea), ] if min_span: SCREAMING_SNAKE_CASE_ : Union[str, Any] = util.check_gold_parse_annotation(lowercase_) if not has_gold_parse: raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''') # util.parse_key_file(key_file) # key_file = key_file + ".parsed" SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate( key_lines=lowercase_ , sys_lines=lowercase_ , metrics=lowercase_ , NP_only=lowercase_ , remove_nested=lowercase_ , keep_singletons=lowercase_ , min_span=lowercase_ , ) return score
91
0
'''simple docstring''' import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class lowercase__ : def __init__( self : List[str] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[Any]=2 ,lowerCamelCase__ : Optional[int]=32 ,lowerCamelCase__ : Optional[Any]=16 ,lowerCamelCase__ : Optional[int]=3 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : str=True ,lowerCamelCase__ : str=32 ,lowerCamelCase__ : Optional[Any]=4 ,lowerCamelCase__ : Union[str, Any]=[0, 1, 2, 3] ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : Union[str, Any]=37 ,lowerCamelCase__ : Optional[Any]="gelu" ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : Union[str, Any]=0.0_2 ,lowerCamelCase__ : Dict=3 ,lowerCamelCase__ : Optional[int]=[1, 384, 24, 24] ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Optional[Any]=None ,): '''simple docstring''' _UpperCamelCase : Tuple = parent _UpperCamelCase : Optional[int] = batch_size _UpperCamelCase : str = image_size _UpperCamelCase : str = patch_size _UpperCamelCase : int = num_channels _UpperCamelCase : Any = is_training _UpperCamelCase : str = use_labels _UpperCamelCase : List[Any] = hidden_size _UpperCamelCase : List[str] = num_hidden_layers _UpperCamelCase : int = backbone_out_indices _UpperCamelCase : Optional[int] = num_attention_heads _UpperCamelCase : int = intermediate_size _UpperCamelCase : Optional[Any] = hidden_act _UpperCamelCase : Optional[Any] = hidden_dropout_prob _UpperCamelCase : Tuple = attention_probs_dropout_prob _UpperCamelCase : Any = initializer_range _UpperCamelCase : List[str] = num_labels _UpperCamelCase : Tuple = backbone_featmap_shape _UpperCamelCase : Dict = scope _UpperCamelCase : Tuple = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) _UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2 _UpperCamelCase : Dict = num_patches + 1 def UpperCamelCase_ ( self : str ): '''simple docstring''' _UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCamelCase : int = None if self.use_labels: _UpperCamelCase : int = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) _UpperCamelCase : int = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' _UpperCamelCase : List[str] = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [96, 192, 384, 768], 'num_groups': 2, } return DPTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,backbone_out_indices=self.backbone_out_indices ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,is_hybrid=self.is_hybrid ,backbone_config=lowerCamelCase__ ,backbone_featmap_shape=self.backbone_featmap_shape ,) def UpperCamelCase_ ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any] ): '''simple docstring''' _UpperCamelCase : Optional[int] = DPTModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() _UpperCamelCase : List[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : str ,lowerCamelCase__ : int ): '''simple docstring''' _UpperCamelCase : Tuple = self.num_labels _UpperCamelCase : int = DPTForDepthEstimation(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() _UpperCamelCase : int = model(lowerCamelCase__ ) self.parent.assertEqual(result.predicted_depth.shape ,(self.batch_size, self.image_size, self.image_size) ) def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str] ): '''simple docstring''' _UpperCamelCase : Tuple = self.num_labels _UpperCamelCase : Tuple = DPTForSemanticSegmentation(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() _UpperCamelCase : str = model(lowerCamelCase__ ,labels=lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' _UpperCamelCase : Optional[int] = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = config_and_inputs _UpperCamelCase : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase__ ( lowercase , lowercase , unittest.TestCase ): lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () lowercase__ = ( { """depth-estimation""": DPTForDepthEstimation, """feature-extraction""": DPTModel, """image-segmentation""": DPTForSemanticSegmentation, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self : str ): '''simple docstring''' _UpperCamelCase : Tuple = DPTModelTester(self ) _UpperCamelCase : Dict = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='DPT does not use inputs_embeds' ) def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self : str ): '''simple docstring''' _UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : int = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) _UpperCamelCase : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) ) def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' _UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : Optional[int] = model_class(lowerCamelCase__ ) _UpperCamelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase : Union[str, Any] = [*signature.parameters.keys()] _UpperCamelCase : Tuple = ['pixel_values'] self.assertListEqual(arg_names[:1] ,lowerCamelCase__ ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' _UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*lowerCamelCase__ ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ ) def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue _UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : Union[str, Any] = True if model_class in get_values(lowerCamelCase__ ): continue _UpperCamelCase : int = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() _UpperCamelCase : List[str] = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ ) _UpperCamelCase : Any = model(**lowerCamelCase__ ).loss loss.backward() def UpperCamelCase_ ( self : str ): '''simple docstring''' for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue _UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : List[Any] = False _UpperCamelCase : Optional[Any] = True if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing: continue _UpperCamelCase : Optional[Any] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.gradient_checkpointing_enable() model.train() _UpperCamelCase : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ ) _UpperCamelCase : Tuple = model(**lowerCamelCase__ ).loss loss.backward() def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' _UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : Optional[int] = _config_zero_init(lowerCamelCase__ ) for model_class in self.all_model_classes: _UpperCamelCase : Any = model_class(config=lowerCamelCase__ ) # Skip the check for the backbone _UpperCamelCase : List[str] = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": _UpperCamelCase : Optional[int] = [F'{name}.{key}' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' pass @slow def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: _UpperCamelCase : Dict = DPTModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type _UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : int = 'add' with self.assertRaises(lowerCamelCase__ ): _UpperCamelCase : Optional[int] = DPTForDepthEstimation(lowerCamelCase__ ) def A__ ( ): _UpperCamelCase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision @slow class lowercase__ ( unittest.TestCase ): def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' _UpperCamelCase : Dict = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' ) _UpperCamelCase : Optional[Any] = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(lowerCamelCase__ ) _UpperCamelCase : Union[str, Any] = prepare_img() _UpperCamelCase : Dict = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): _UpperCamelCase : Tuple = model(**lowerCamelCase__ ) _UpperCamelCase : List[str] = outputs.predicted_depth # verify the predicted depth _UpperCamelCase : Optional[int] = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape ,lowerCamelCase__ ) _UpperCamelCase : List[str] = torch.tensor( [[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 ,lowerCamelCase__ ,atol=1E-4 ) )
236
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: snake_case_ : List[Any] = None snake_case_ : Dict = logging.get_logger(__name__) snake_case_ : Dict = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} snake_case_ : List[str] = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } snake_case_ : str = { 'facebook/nllb-large-en-ro': 1024, 'facebook/nllb-200-distilled-600M': 1024, } # fmt: off snake_case_ : Optional[Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class lowercase__ ( lowercase ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = ["""input_ids""", """attention_mask"""] lowercase__ = NllbTokenizer lowercase__ = [] lowercase__ = [] def __init__( self : List[Any] ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : List[Any]="<s>" ,lowerCamelCase__ : Dict="</s>" ,lowerCamelCase__ : List[Any]="</s>" ,lowerCamelCase__ : Union[str, Any]="<s>" ,lowerCamelCase__ : List[Any]="<unk>" ,lowerCamelCase__ : Any="<pad>" ,lowerCamelCase__ : Optional[Any]="<mask>" ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : str=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : Union[str, Any]=False ,**lowerCamelCase__ : Optional[Any] ,): '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase : Optional[int] = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else mask_token _UpperCamelCase : Union[str, Any] = legacy_behaviour super().__init__( vocab_file=lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,src_lang=lowerCamelCase__ ,tgt_lang=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,legacy_behaviour=lowerCamelCase__ ,**lowerCamelCase__ ,) _UpperCamelCase : int = vocab_file _UpperCamelCase : int = False if not self.vocab_file else True _UpperCamelCase : Dict = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) _UpperCamelCase : List[str] = { lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _UpperCamelCase : List[str] = src_lang if src_lang is not None else 'eng_Latn' _UpperCamelCase : int = self.convert_tokens_to_ids(self._src_lang ) _UpperCamelCase : Dict = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' return self._src_lang @src_lang.setter def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ): '''simple docstring''' _UpperCamelCase : str = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ): '''simple docstring''' _UpperCamelCase : Dict = [self.sep_token_id] _UpperCamelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] ,lowerCamelCase__ : Optional[str] ,**lowerCamelCase__ : Dict ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _UpperCamelCase : Tuple = src_lang _UpperCamelCase : Optional[Any] = self(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ ) _UpperCamelCase : Tuple = self.convert_tokens_to_ids(lowerCamelCase__ ) _UpperCamelCase : str = tgt_lang_id return inputs def UpperCamelCase_ ( self : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str = "eng_Latn" ,lowerCamelCase__ : Optional[List[str]] = None ,lowerCamelCase__ : str = "fra_Latn" ,**lowerCamelCase__ : Union[str, Any] ,): '''simple docstring''' _UpperCamelCase : Tuple = src_lang _UpperCamelCase : List[str] = tgt_lang return super().prepare_seqaseq_batch(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def UpperCamelCase_ ( self : Any ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCamelCase_ ( self : str ,lowerCamelCase__ : List[Any] ): '''simple docstring''' _UpperCamelCase : int = self.convert_tokens_to_ids(lowerCamelCase__ ) if self.legacy_behaviour: _UpperCamelCase : Optional[Any] = [] _UpperCamelCase : int = [self.eos_token_id, self.cur_lang_code] else: _UpperCamelCase : List[Any] = [self.cur_lang_code] _UpperCamelCase : List[Any] = [self.eos_token_id] _UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens ) _UpperCamelCase : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,) def UpperCamelCase_ ( self : int ,lowerCamelCase__ : str ): '''simple docstring''' _UpperCamelCase : Any = self.convert_tokens_to_ids(lowerCamelCase__ ) if self.legacy_behaviour: _UpperCamelCase : Tuple = [] _UpperCamelCase : str = [self.eos_token_id, self.cur_lang_code] else: _UpperCamelCase : Tuple = [self.cur_lang_code] _UpperCamelCase : Optional[Any] = [self.eos_token_id] _UpperCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens ) _UpperCamelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _UpperCamelCase : Optional[int] = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,) def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCamelCase__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory.' ) return _UpperCamelCase : List[Any] = os.path.join( lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ): copyfile(self.vocab_file ,lowerCamelCase__ ) return (out_vocab_file,)
236
1
'''simple docstring''' from statistics import mean, stdev def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = 3 ): _UpperCamelCase : List[Any] = min(UpperCAmelCase_ ) _UpperCamelCase : Dict = max(UpperCAmelCase_ ) # normalize data return [round((x - x_min) / (x_max - x_min) , UpperCAmelCase_ ) for x in data] def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = 3 ): _UpperCamelCase : str = mean(UpperCAmelCase_ ) _UpperCamelCase : Tuple = stdev(UpperCAmelCase_ ) # standardize data return [round((x - mu) / (sigma) , UpperCAmelCase_ ) for x in data]
83
"""simple docstring""" def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: '''simple docstring''' while a != 0: __snake_case , __snake_case : Optional[Any] = b % a, a return b def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: '''simple docstring''' if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1: __snake_case : Optional[Any] = F"mod inverse of {a!r} and {m!r} does not exist" raise ValueError(UpperCAmelCase_ ) __snake_case , __snake_case , __snake_case : Optional[int] = 1, 0, a __snake_case , __snake_case , __snake_case : int = 0, 1, m while va != 0: __snake_case : Union[str, Any] = ua // va __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
172
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCamelCase__ = { '''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''], '''tokenization_canine''': ['''CanineTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CanineForMultipleChoice''', '''CanineForQuestionAnswering''', '''CanineForSequenceClassification''', '''CanineForTokenClassification''', '''CanineLayer''', '''CanineModel''', '''CaninePreTrainedModel''', '''load_tf_weights_in_canine''', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
352
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCamelCase__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ['''GPTNeoXTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXForCausalLM''', '''GPTNeoXForQuestionAnswering''', '''GPTNeoXForSequenceClassification''', '''GPTNeoXForTokenClassification''', '''GPTNeoXLayer''', '''GPTNeoXModel''', '''GPTNeoXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
63
0
'''simple docstring''' import os import sys import unittest lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : List[Any] = {'''BertModelTest''': '''BertModelTester'''} A : int = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : List[str] = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A : Union[str, Any] = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Dict = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A : str = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
3
from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake lowercase_ = numpy.array([0, 0]) lowercase_ = numpy.array([0.5, 0.866_0254]) lowercase_ = numpy.array([1, 0]) lowercase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = initial_vectors for _ in range(snake_case ): __SCREAMING_SNAKE_CASE : Dict = iteration_step(snake_case ) return vectors def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = [] for i, start_vector in enumerate(vectors[:-1] ): __SCREAMING_SNAKE_CASE : str = vectors[i + 1] new_vectors.append(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = numpy.radians(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = numpy.cos(snake_case ), numpy.sin(snake_case ) __SCREAMING_SNAKE_CASE : Any = numpy.array(((c, -s), (s, c)) ) return numpy.dot(snake_case , snake_case ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = plt.gca() axes.set_aspect('''equal''' ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = zip(*snake_case ) plt.plot(snake_case , snake_case ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() lowercase_ = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
303
0
'''simple docstring''' from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def lowerCamelCase ( lowerCAmelCase : Dict[str, torch.Tensor] ): """simple docstring""" __magic_name__ : Optional[Any] = [] __magic_name__ : str = [] __magic_name__ : Optional[Any] = [] for rt in rc.restypes: __magic_name__ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) __magic_name__ : Optional[int] = {name: i for i, name in enumerate(lowerCAmelCase )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) __magic_name__ : Dict = torch.tensor( lowerCAmelCase , dtype=torch.intaa , device=protein['aatype'].device , ) __magic_name__ : Union[str, Any] = torch.tensor( lowerCAmelCase , dtype=torch.intaa , device=protein['aatype'].device , ) __magic_name__ : Dict = torch.tensor( lowerCAmelCase , dtype=torch.floataa , device=protein['aatype'].device , ) __magic_name__ : Optional[int] = protein['aatype'].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein __magic_name__ : Any = restype_atomaa_to_atomaa[protein_aatype] __magic_name__ : int = restype_atomaa_mask[protein_aatype] __magic_name__ : Optional[Any] = residx_atomaa_mask __magic_name__ : Any = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back __magic_name__ : Any = restype_atomaa_to_atomaa[protein_aatype] __magic_name__ : Optional[int] = residx_atomaa_to_atomaa.long() # create the corresponding mask __magic_name__ : str = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['aatype'].device ) for restype, restype_letter in enumerate(rc.restypes ): __magic_name__ : Any = rc.restype_atoa[restype_letter] __magic_name__ : Optional[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: __magic_name__ : List[str] = rc.atom_order[atom_name] __magic_name__ : List[str] = 1 __magic_name__ : List[str] = restype_atomaa_mask[protein_aatype] __magic_name__ : List[str] = residx_atomaa_mask return protein def lowerCamelCase ( lowerCAmelCase : Dict[str, torch.Tensor] ): """simple docstring""" __magic_name__ : Tuple = tree_map(lambda lowerCAmelCase : torch.tensor(lowerCAmelCase , device=batch['aatype'].device ) , lowerCAmelCase , np.ndarray ) __magic_name__ : Optional[int] = tensor_tree_map(lambda lowerCAmelCase : np.array(lowerCAmelCase ) , make_atomaa_masks(lowerCAmelCase ) ) return out
352
'''simple docstring''' from typing import List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) from .text_encoder import MultilingualCLIP lowerCAmelCase :str = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCAmelCase :int = ''' Examples: ```py >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline >>> import torch >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> negative_image_emb = out.negative_image_embeds >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1") >>> pipe.to("cuda") >>> image = pipe( ... prompt, ... image_embeds=image_emb, ... negative_image_embeds=negative_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... ).images >>> image[0].save("cat.png") ``` ''' def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any]=8 ): """simple docstring""" __magic_name__ : List[str] = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 __magic_name__ : str = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class _lowerCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self : Union[str, Any] , _A : MultilingualCLIP , _A : XLMRobertaTokenizer , _A : UNetaDConditionModel , _A : Union[DDIMScheduler, DDPMScheduler] , _A : VQModel , ) -> int: super().__init__() self.register_modules( text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , movq=_A , ) __magic_name__ : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : Optional[Any] , _A : Optional[int] , _A : Dict , _A : str , _A : List[str] ) -> str: if latents is None: __magic_name__ : Any = randn_tensor(_A , generator=_A , device=_A , dtype=_A ) else: if latents.shape != shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' ) __magic_name__ : int = latents.to(_A ) __magic_name__ : Union[str, Any] = latents * scheduler.init_noise_sigma return latents def __lowerCAmelCase ( self : List[Any] , _A : List[str] , _A : List[str] , _A : List[str] , _A : List[Any] , _A : str=None , ) -> Dict: __magic_name__ : Optional[Any] = len(_A ) if isinstance(_A , _A ) else 1 # get prompt text embeddings __magic_name__ : str = self.tokenizer( _A , padding='max_length' , truncation=_A , max_length=77 , return_attention_mask=_A , add_special_tokens=_A , return_tensors='pt' , ) __magic_name__ : Optional[Any] = text_inputs.input_ids __magic_name__ : Optional[Any] = self.tokenizer(_A , padding='longest' , return_tensors='pt' ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_A , _A ): __magic_name__ : str = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' F' {self.tokenizer.model_max_length} tokens: {removed_text}' ) __magic_name__ : Union[str, Any] = text_input_ids.to(_A ) __magic_name__ : Dict = text_inputs.attention_mask.to(_A ) __magic_name__ , __magic_name__ : str = self.text_encoder( input_ids=_A , attention_mask=_A ) __magic_name__ : Tuple = prompt_embeds.repeat_interleave(_A , dim=0 ) __magic_name__ : int = text_encoder_hidden_states.repeat_interleave(_A , dim=0 ) __magic_name__ : Union[str, Any] = text_mask.repeat_interleave(_A , dim=0 ) if do_classifier_free_guidance: __magic_name__ : List[str] if negative_prompt is None: __magic_name__ : Optional[Any] = [''] * batch_size elif type(_A ) is not type(_A ): raise TypeError( F'`negative_prompt` should be the same type to `prompt`, but got {type(_A )} !=' F' {type(_A )}.' ) elif isinstance(_A , _A ): __magic_name__ : int = [negative_prompt] elif batch_size != len(_A ): raise ValueError( F'`negative_prompt`: {negative_prompt} has batch size {len(_A )}, but `prompt`:' F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches' ' the batch size of `prompt`.' ) else: __magic_name__ : Dict = negative_prompt __magic_name__ : List[str] = self.tokenizer( _A , padding='max_length' , max_length=77 , truncation=_A , return_attention_mask=_A , add_special_tokens=_A , return_tensors='pt' , ) __magic_name__ : Optional[int] = uncond_input.input_ids.to(_A ) __magic_name__ : Optional[Any] = uncond_input.attention_mask.to(_A ) __magic_name__ , __magic_name__ : int = self.text_encoder( input_ids=_A , attention_mask=_A ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __magic_name__ : List[str] = negative_prompt_embeds.shape[1] __magic_name__ : str = negative_prompt_embeds.repeat(1 , _A ) __magic_name__ : Dict = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A ) __magic_name__ : Any = uncond_text_encoder_hidden_states.shape[1] __magic_name__ : Optional[int] = uncond_text_encoder_hidden_states.repeat(1 , _A , 1 ) __magic_name__ : Tuple = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt , _A , -1 ) __magic_name__ : List[Any] = uncond_text_mask.repeat_interleave(_A , dim=0 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __magic_name__ : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] ) __magic_name__ : str = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] ) __magic_name__ : str = torch.cat([uncond_text_mask, text_mask] ) return prompt_embeds, text_encoder_hidden_states, text_mask def __lowerCAmelCase ( self : Dict , _A : List[Any]=0 ) -> Tuple: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) __magic_name__ : List[Any] = torch.device(F'cuda:{gpu_id}' ) __magic_name__ : Dict = [ self.unet, self.text_encoder, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_A , _A ) def __lowerCAmelCase ( self : List[Any] , _A : List[str]=0 ) -> str: if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) __magic_name__ : int = torch.device(F'cuda:{gpu_id}' ) if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=_A ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __magic_name__ : Optional[int] = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: __magic_name__ , __magic_name__ : Union[str, Any] = cpu_offload_with_hook(_A , _A , prev_module_hook=_A ) if self.safety_checker is not None: __magic_name__ , __magic_name__ : List[str] = cpu_offload_with_hook(self.safety_checker , _A , prev_module_hook=_A ) # We'll offload the last model manually. __magic_name__ : Any = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __lowerCAmelCase ( self : int ) -> List[str]: if not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(_A , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_A ) def __call__( self : int , _A : Union[str, List[str]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Optional[Union[str, List[str]]] = None , _A : int = 512 , _A : int = 512 , _A : int = 100 , _A : float = 4.0 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , ) -> Optional[int]: if isinstance(_A , _A ): __magic_name__ : Optional[int] = 1 elif isinstance(_A , _A ): __magic_name__ : Union[str, Any] = len(_A ) else: raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(_A )}' ) __magic_name__ : Tuple = self._execution_device __magic_name__ : Any = batch_size * num_images_per_prompt __magic_name__ : int = guidance_scale > 1.0 __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = self._encode_prompt( _A , _A , _A , _A , _A ) if isinstance(_A , _A ): __magic_name__ : Union[str, Any] = torch.cat(_A , dim=0 ) if isinstance(_A , _A ): __magic_name__ : Dict = torch.cat(_A , dim=0 ) if do_classifier_free_guidance: __magic_name__ : Dict = image_embeds.repeat_interleave(_A , dim=0 ) __magic_name__ : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 ) __magic_name__ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to( dtype=prompt_embeds.dtype , device=_A ) self.scheduler.set_timesteps(_A , device=_A ) __magic_name__ : Tuple = self.scheduler.timesteps __magic_name__ : Optional[int] = self.unet.config.in_channels __magic_name__ , __magic_name__ : Dict = get_new_h_w(_A , _A , self.movq_scale_factor ) # create initial latent __magic_name__ : Union[str, Any] = self.prepare_latents( (batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _A , _A , _A , self.scheduler , ) for i, t in enumerate(self.progress_bar(_A ) ): # expand the latents if we are doing classifier free guidance __magic_name__ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __magic_name__ : Tuple = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds} __magic_name__ : Union[str, Any] = self.unet( sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0] if do_classifier_free_guidance: __magic_name__ , __magic_name__ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) __magic_name__ , __magic_name__ : Dict = noise_pred.chunk(2 ) __magic_name__ , __magic_name__ : List[str] = variance_pred.chunk(2 ) __magic_name__ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __magic_name__ : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __magic_name__ , __magic_name__ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __magic_name__ : List[Any] = self.scheduler.step( _A , _A , _A , generator=_A , ).prev_sample # post-processing __magic_name__ : int = self.movq.decode(_A , force_not_quantize=_A )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' ) if output_type in ["np", "pil"]: __magic_name__ : Dict = image * 0.5 + 0.5 __magic_name__ : str = image.clamp(0 , 1 ) __magic_name__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __magic_name__ : str = self.numpy_to_pil(_A ) if not return_dict: return (image,) return ImagePipelineOutput(images=_A )
275
0
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness a__ : List[str] ='''\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } ''' a__ : Optional[Any] ='''\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). ''' a__ : Optional[int] =''' Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {\'pass@1\': 0.5, \'pass@2\': 1.0} ''' a__ : Dict =''' ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ ''' a__ : Optional[Any] ='''The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): """simple docstring""" def _lowerCamelCase ( self : Union[str, Any] ): return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' ) ), 'references': datasets.Value('string' ), } ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , ) def _lowerCamelCase ( self : Union[str, Any] , __A : Dict , __A : Tuple , __A : List[str]=[1, 1_0, 1_0_0] , __A : Dict=4 , __A : List[str]=3.0 ): if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('This metric is currently not supported on Windows.' ) with ThreadPoolExecutor(max_workers=__A ) as executor: __UpperCamelCase = [] __UpperCamelCase = Counter() __UpperCamelCase = 0 __UpperCamelCase = defaultdict(__A ) for task_id, (candidates, test_case) in enumerate(zip(__A , __A ) ): for candidate in candidates: __UpperCamelCase = candidate + '\n' + test_case __UpperCamelCase = (test_program, timeout, task_id, completion_id[task_id]) __UpperCamelCase = executor.submit(__A , *__A ) futures.append(__A ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(__A ): __UpperCamelCase = future.result() results[result["task_id"]].append((result['completion_id'], result) ) __UpperCamelCase , __UpperCamelCase = [], [] for result in results.values(): result.sort() __UpperCamelCase = [r[1]['passed'] for r in result] total.append(len(__A ) ) correct.append(sum(__A ) ) __UpperCamelCase = np.array(__A ) __UpperCamelCase = np.array(__A ) __UpperCamelCase = k __UpperCamelCase = {f'''pass@{k}''': estimate_pass_at_k(__A , __A , __A ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def lowercase__ ( __lowercase : Optional[Any] , __lowercase : int , __lowercase : Union[str, Any] ) -> Optional[int]: """simple docstring""" def estimator(__lowercase : int , __lowercase : int , __lowercase : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(__lowercase , __lowercase ): __UpperCamelCase = itertools.repeat(__lowercase , len(__lowercase ) ) else: assert len(__lowercase ) == len(__lowercase ) __UpperCamelCase = iter(__lowercase ) return np.array([estimator(int(__lowercase ) , int(__lowercase ) , __lowercase ) for n, c in zip(__lowercase , __lowercase )] )
53
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _UpperCAmelCase ( A__ ): """simple docstring""" def __init__( self : Dict, *lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Any=None, **lowerCamelCase : str ): '''simple docstring''' super().__init__(*lowerCamelCase, **lowerCamelCase ) lowercase__ = eval_examples lowercase__ = post_process_function def lowercase__ ( self : int, lowerCamelCase : str=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : str = "eval" ): '''simple docstring''' lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset lowercase__ = self.get_eval_dataloader(lowerCamelCase ) lowercase__ = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowercase__ = self.compute_metrics lowercase__ = None lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowercase__ = time.time() try: lowercase__ = eval_loop( lowerCamelCase, description='''Evaluation''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, ) finally: lowercase__ = compute_metrics lowercase__ = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowercase__ = self.post_process_function(lowerCamelCase, lowerCamelCase, output.predictions ) lowercase__ = self.compute_metrics(lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): lowercase__ = metrics.pop(lowerCamelCase ) metrics.update(output.metrics ) else: lowercase__ = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(lowerCamelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowercase__ = self.callback_handler.on_evaluate(self.args, self.state, self.control, lowerCamelCase ) return metrics def lowercase__ ( self : List[Any], lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : int=None, lowerCamelCase : str = "test" ): '''simple docstring''' lowercase__ = self.get_test_dataloader(lowerCamelCase ) # Temporarily disable metric computation, we will do it in the loop here. lowercase__ = self.compute_metrics lowercase__ = None lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowercase__ = time.time() try: lowercase__ = eval_loop( lowerCamelCase, description='''Prediction''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, ) finally: lowercase__ = compute_metrics lowercase__ = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowercase__ = self.post_process_function(lowerCamelCase, lowerCamelCase, output.predictions, '''predict''' ) lowercase__ = self.compute_metrics(lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): lowercase__ = metrics.pop(lowerCamelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=lowerCamelCase )
207
0
def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = [0] * len(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = [] _SCREAMING_SNAKE_CASE : List[Any] = [1] * len(__lowerCamelCase ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(__lowerCamelCase ) ): if indegree[i] == 0: queue.append(__lowerCamelCase ) while queue: _SCREAMING_SNAKE_CASE : Any = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: _SCREAMING_SNAKE_CASE : Union[str, Any] = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(__lowerCamelCase ) print(max(__lowerCamelCase ) ) # Adjacency list of Graph UpperCamelCase__ ={0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
366
from maths.prime_check import is_prime def lowerCamelCase__ (__lowerCamelCase ): if not isinstance(__lowerCamelCase, __lowerCamelCase ): _SCREAMING_SNAKE_CASE : List[str] = f"""Input value of [number={number}] must be an integer""" raise TypeError(__lowerCamelCase ) if is_prime(__lowerCamelCase ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
325
0
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowerCAmelCase ( lowerCAmelCase , unittest.TestCase): _a = DanceDiffusionPipeline _a = UNCONDITIONAL_AUDIO_GENERATION_PARAMS _a = PipelineTesterMixin.required_optional_params - { '''callback''', '''latents''', '''callback_steps''', '''output_type''', '''num_images_per_prompt''', } _a = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS _a = False _a = False def SCREAMING_SNAKE_CASE ( self: Optional[int] ): torch.manual_seed(0 ) lowercase :str = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_lowerCAmelCase , use_timestep_embedding=_lowerCAmelCase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , ) lowercase :List[Any] = IPNDMScheduler() lowercase :Optional[int] = { "unet": unet, "scheduler": scheduler, } return components def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: str=0 ): if str(_lowerCAmelCase ).startswith("mps" ): lowercase :Optional[Any] = torch.manual_seed(_lowerCAmelCase ) else: lowercase :Union[str, Any] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) lowercase :Any = { "batch_size": 1, "generator": generator, "num_inference_steps": 4, } return inputs def SCREAMING_SNAKE_CASE ( self: str ): lowercase :List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase :List[Any] = self.get_dummy_components() lowercase :int = DanceDiffusionPipeline(**_lowerCAmelCase ) lowercase :Tuple = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase :Dict = self.get_dummy_inputs(_lowerCAmelCase ) lowercase :Dict = pipe(**_lowerCAmelCase ) lowercase :Optional[int] = output.audios lowercase :Tuple = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) lowercase :Optional[int] = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def SCREAMING_SNAKE_CASE ( self: Any ): return super().test_save_load_local() @skip_mps def SCREAMING_SNAKE_CASE ( self: Tuple ): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) @skip_mps def SCREAMING_SNAKE_CASE ( self: List[str] ): return super().test_save_load_optional_components() @skip_mps def SCREAMING_SNAKE_CASE ( self: Any ): return super().test_attention_slicing_forward_pass() def SCREAMING_SNAKE_CASE ( self: str ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self: Optional[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self: List[Any] ): lowercase :List[Any] = torch_device lowercase :List[Any] = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" ) lowercase :Tuple = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase :List[Any] = torch.manual_seed(0 ) lowercase :str = pipe(generator=_lowerCAmelCase , num_inference_steps=1_00 , audio_length_in_s=4.0_96 ) lowercase :Union[str, Any] = output.audios lowercase :Optional[Any] = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowercase :List[str] = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE ( self: int ): lowercase :Optional[Any] = torch_device lowercase :Any = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa ) lowercase :Dict = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase :int = torch.manual_seed(0 ) lowercase :Dict = pipe(generator=_lowerCAmelCase , num_inference_steps=1_00 , audio_length_in_s=4.0_96 ) lowercase :Dict = output.audios lowercase :Optional[int] = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowercase :Union[str, Any] = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
236
import os from math import logaa def UpperCAmelCase__ ( lowerCamelCase = "base_exp.txt" ): lowercase :float = 0 lowercase :str = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase ), lowerCamelCase ) ) ): lowercase , lowercase :str = list(map(lowerCamelCase, line.split("," ) ) ) if x * logaa(lowerCamelCase ) > largest: lowercase :Optional[Any] = x * logaa(lowerCamelCase ) lowercase :Any = i + 1 return result if __name__ == "__main__": print(solution())
236
1
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A_ : '''simple docstring''' def __init__( self: int , a: str , a: List[Any]=13 , a: Optional[int]=32 , a: Any=3 , a: int=4 , a: Optional[int]=[10, 20, 30, 40] , a: List[Any]=[2, 2, 3, 2] , a: List[Any]=True , a: Optional[Any]=True , a: int=37 , a: Union[str, Any]="gelu" , a: Tuple=10 , a: int=0.0_2 , a: Any=["stage2", "stage3", "stage4"] , a: Optional[Any]=3 , a: Tuple=None , ): __lowerCamelCase : str = parent __lowerCamelCase : Optional[int] = batch_size __lowerCamelCase : List[str] = image_size __lowerCamelCase : Dict = num_channels __lowerCamelCase : Optional[Any] = num_stages __lowerCamelCase : Optional[Any] = hidden_sizes __lowerCamelCase : List[Any] = depths __lowerCamelCase : List[Any] = is_training __lowerCamelCase : int = use_labels __lowerCamelCase : Optional[Any] = intermediate_size __lowerCamelCase : str = hidden_act __lowerCamelCase : List[Any] = type_sequence_label_size __lowerCamelCase : Dict = initializer_range __lowerCamelCase : Optional[Any] = out_features __lowerCamelCase : Any = num_labels __lowerCamelCase : Union[str, Any] = scope __lowerCamelCase : Optional[int] = num_stages def _snake_case ( self: str ): __lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase : List[str] = None if self.use_labels: __lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase : List[Any] = self.get_config() return config, pixel_values, labels def _snake_case ( self: str ): return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def _snake_case ( self: List[Any] ): return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowercase__ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=lowercase__ , loss_ignore_index=255 , num_labels=self.num_labels , ) def _snake_case ( self: Any , a: List[str] , a: Any , a: Any ): __lowerCamelCase : Tuple = UperNetForSemanticSegmentation(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowerCamelCase : str = model(lowercase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def _snake_case ( self: Union[str, Any] ): __lowerCamelCase : str = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : Optional[Any] = config_and_inputs __lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): '''simple docstring''' __snake_case = (UperNetForSemanticSegmentation,) if is_torch_available() else () __snake_case = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {} __snake_case = False __snake_case = False __snake_case = False __snake_case = False __snake_case = False __snake_case = False def _snake_case ( self: Union[str, Any] ): __lowerCamelCase : List[str] = UperNetModelTester(self ) __lowerCamelCase : List[Any] = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 ) def _snake_case ( self: Union[str, Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self: Optional[int] ): return def _snake_case ( self: Optional[Any] ): __lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : int = model_class(lowercase__ ) __lowerCamelCase : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : List[Any] = [*signature.parameters.keys()] __lowerCamelCase : str = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase__ ) def _snake_case ( self: Optional[Any] ): __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowercase__ ) @unittest.skip(reason='UperNet does not use inputs_embeds' ) def _snake_case ( self: int ): pass @unittest.skip(reason='UperNet does not support input and output embeddings' ) def _snake_case ( self: str ): pass @unittest.skip(reason='UperNet does not have a base model' ) def _snake_case ( self: Optional[Any] ): pass @unittest.skip(reason='UperNet does not have a base model' ) def _snake_case ( self: Union[str, Any] ): pass @require_torch_multi_gpu @unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _snake_case ( self: Any ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _snake_case ( self: List[Any] ): pass def _snake_case ( self: Any ): def check_hidden_states_output(a: List[str] , a: int , a: List[str] ): __lowerCamelCase : Tuple = model_class(lowercase__ ) model.to(lowercase__ ) model.eval() with torch.no_grad(): __lowerCamelCase : Dict = model(**self._prepare_for_class(lowercase__ , lowercase__ ) ) __lowerCamelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowerCamelCase : Any = self.model_tester.num_stages self.assertEqual(len(lowercase__ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Optional[Any] = True check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase : Union[str, Any] = True check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ) def _snake_case ( self: Dict ): __lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase : Optional[int] = _config_zero_init(lowercase__ ) __lowerCamelCase : Tuple = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: __lowerCamelCase : Tuple = model_class(config=lowercase__ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @unittest.skip(reason='UperNet does not have tied weights' ) def _snake_case ( self: Optional[Any] ): pass @slow def _snake_case ( self: Optional[Any] ): for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : int = UperNetForSemanticSegmentation.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) def UpperCamelCase__ ( ): __lowerCamelCase : List[str] = hf_hub_download( repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' ) __lowerCamelCase : str = Image.open(A__ ).convert('RGB' ) return image @require_torch @require_vision @slow class A_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self: Optional[Any] ): __lowerCamelCase : List[Any] = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' ) __lowerCamelCase : Any = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(lowercase__ ) __lowerCamelCase : Union[str, Any] = prepare_img() __lowerCamelCase : Any = processor(images=lowercase__ , return_tensors='pt' ).to(lowercase__ ) with torch.no_grad(): __lowerCamelCase : Tuple = model(**lowercase__ ) __lowerCamelCase : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , lowercase__ ) __lowerCamelCase : Tuple = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(lowercase__ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase__ , atol=1e-4 ) ) def _snake_case ( self: Tuple ): __lowerCamelCase : Optional[int] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' ) __lowerCamelCase : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(lowercase__ ) __lowerCamelCase : Tuple = prepare_img() __lowerCamelCase : Tuple = processor(images=lowercase__ , return_tensors='pt' ).to(lowercase__ ) with torch.no_grad(): __lowerCamelCase : int = model(**lowercase__ ) __lowerCamelCase : Tuple = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , lowercase__ ) __lowerCamelCase : List[Any] = torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(lowercase__ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase__ , atol=1e-4 ) )
358
from __future__ import annotations def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : Optional[Any] = 0.00 __lowerCamelCase : Tuple = 0 for resistor in resistors: if resistor <= 0: __lowerCamelCase : Union[str, Any] = f'Resistor at index {index} has a negative or zero value!' raise ValueError(SCREAMING_SNAKE_CASE__ ) first_sum += 1 / float(SCREAMING_SNAKE_CASE__ ) index += 1 return 1 / first_sum def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : Union[str, Any] = 0.00 __lowerCamelCase : str = 0 for resistor in resistors: sum_r += resistor if resistor < 0: __lowerCamelCase : Any = f'Resistor at index {index} has a negative value!' raise ValueError(SCREAMING_SNAKE_CASE__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
194
0
"""simple docstring""" from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE__ = { "vocab_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json" }, "merges_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt" }, } SCREAMING_SNAKE_CASE__ = {"allegro/herbert-base-cased": 514} SCREAMING_SNAKE_CASE__ = {} class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION _SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE = HerbertTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase="</s>" , **lowercase , ) -> List[Any]: super().__init__( lowercase , lowercase , tokenizer_file=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , sep_token=lowercase , **lowercase , ) def _snake_case ( self , lowercase , lowercase = None ) -> List[int]: lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _snake_case ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase ) if token_ids_a is None: return [1] + ([0] * len(lowercase )) + [1] return [1] + ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1] def _snake_case ( self , lowercase , lowercase = None ) -> List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]: lowerCAmelCase = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase )
46
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> set: lowerCamelCase__ : Optional[Any] = set() # edges = list of graph's edges lowerCamelCase__ : List[str] = get_edges(_UpperCAmelCase ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: lowerCamelCase__ , lowerCamelCase__ : str = edges.pop() chosen_vertices.add(_UpperCAmelCase ) chosen_vertices.add(_UpperCAmelCase ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(_UpperCAmelCase ) return chosen_vertices def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> set: lowerCamelCase__ : Union[str, Any] = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
50
0
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): a : Optional[Any] = """pt""" elif is_tf_available(): a : Union[str, Any] = """tf""" else: a : Any = """jax""" class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = PerceiverTokenizer __lowerCamelCase = False def UpperCAmelCase_ ( self ): '''simple docstring''' super().setUp() lowercase__ : str= PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase_ ( self ): '''simple docstring''' return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" ) def UpperCAmelCase_ ( self , **snake_case__ ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__=False , snake_case__=20 , snake_case__=5 ): '''simple docstring''' lowercase__ : Union[str, Any]= [] for i in range(len(snake_case__ ) ): try: lowercase__ : Any= tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case__ ) except UnicodeDecodeError: pass toks.append((i, tok) ) lowercase__ : int= list(filter(lambda snake_case__ : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case__ ) ) lowercase__ : Union[str, Any]= list(filter(lambda snake_case__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case__ ) , snake_case__ ) ) if max_length is not None and len(snake_case__ ) > max_length: lowercase__ : int= toks[:max_length] if min_length is not None and len(snake_case__ ) < min_length and len(snake_case__ ) > 0: while len(snake_case__ ) < min_length: lowercase__ : List[str]= toks + toks # toks_str = [t[1] for t in toks] lowercase__ : str= [t[0] for t in toks] # Ensure consistency lowercase__ : Optional[Any]= tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ ) if " " not in output_txt and len(snake_case__ ) > 1: lowercase__ : Dict= ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case__ ) + " " + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case__ ) ) if with_prefix_space: lowercase__ : List[Any]= " " + output_txt lowercase__ : Any= tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) return output_txt, output_ids def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.perceiver_tokenizer lowercase__ : Union[str, Any]= "Unicode €." lowercase__ : Tuple= tokenizer(snake_case__ ) lowercase__ : Dict= [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded["input_ids"] , snake_case__ ) # decoding lowercase__ : List[str]= tokenizer.decode(snake_case__ ) self.assertEqual(snake_case__ , "[CLS]Unicode €.[SEP]" ) lowercase__ : List[str]= tokenizer("e è é ê ë" ) lowercase__ : int= [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded["input_ids"] , snake_case__ ) # decoding lowercase__ : Any= tokenizer.decode(snake_case__ ) self.assertEqual(snake_case__ , "[CLS]e è é ê ë[SEP]" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= self.perceiver_tokenizer lowercase__ : Tuple= ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off lowercase__ : List[Any]= [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on lowercase__ : Dict= tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) if FRAMEWORK != "jax": lowercase__ : List[Any]= list(batch.input_ids.numpy()[0] ) else: lowercase__ : str= list(batch.input_ids.tolist()[0] ) self.assertListEqual(snake_case__ , snake_case__ ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.perceiver_tokenizer lowercase__ : int= ["A long paragraph for summarization.", "Another paragraph for summarization."] lowercase__ : Union[str, Any]= tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ ) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids" , snake_case__ ) self.assertIn("attention_mask" , snake_case__ ) self.assertNotIn("decoder_input_ids" , snake_case__ ) self.assertNotIn("decoder_attention_mask" , snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.perceiver_tokenizer lowercase__ : int= [ "Summary of the text.", "Another summary.", ] lowercase__ : List[Any]= tokenizer( text_target=snake_case__ , max_length=32 , padding="max_length" , truncation=snake_case__ , return_tensors=snake_case__ ) self.assertEqual(32 , targets["input_ids"].shape[1] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test lowercase__ : List[str]= self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase__ : str= tempfile.mkdtemp() lowercase__ : str= " He is very happy, UNwant\u00E9d,running" lowercase__ : int= tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) tokenizer.save_pretrained(snake_case__ ) lowercase__ : List[str]= tokenizer.__class__.from_pretrained(snake_case__ ) lowercase__ : Tuple= after_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) shutil.rmtree(snake_case__ ) lowercase__ : List[str]= self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase__ : Tuple= tempfile.mkdtemp() lowercase__ : List[Any]= " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"] ) lowercase__ : Tuple= tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token" ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) lowercase__ : List[str]= tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) tokenizer.save_pretrained(snake_case__ ) lowercase__ : Tuple= tokenizer.__class__.from_pretrained(snake_case__ ) lowercase__ : Tuple= after_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) lowercase__ : Tuple= tokenizer.__class__.from_pretrained(snake_case__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(snake_case__ ) with open(os.path.join(snake_case__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: lowercase__ : Optional[Any]= json.load(snake_case__ ) with open(os.path.join(snake_case__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: lowercase__ : Optional[Any]= json.load(snake_case__ ) lowercase__ : List[str]= [F'''<extra_id_{i}>''' for i in range(125 )] lowercase__ : Optional[int]= added_tokens_extra_ids + [ "an_additional_special_token" ] lowercase__ : Union[str, Any]= added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(snake_case__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(snake_case__ , snake_case__ ) with open(os.path.join(snake_case__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(snake_case__ , snake_case__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowercase__ : Optional[int]= tokenizer_class.from_pretrained( snake_case__ , ) self.assertIn( "an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowercase__ : int= added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case__ )] lowercase__ : int= tokenizer_class.from_pretrained( snake_case__ , additional_special_tokens=snake_case__ , ) self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens ) self.assertEqual( ["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , "�" ) def UpperCAmelCase_ ( self ): '''simple docstring''' pass def UpperCAmelCase_ ( self ): '''simple docstring''' pass def UpperCAmelCase_ ( self ): '''simple docstring''' pass def UpperCAmelCase_ ( self ): '''simple docstring''' pass def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.get_tokenizers(fast=snake_case__ , do_lower_case=snake_case__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase__ : Optional[int]= ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"] lowercase__ : Optional[int]= tokenizer.convert_tokens_to_string(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ )
370
"""simple docstring""" import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class __UpperCAmelCase( nn.Module ): """simple docstring""" def __init__( self ): '''simple docstring''' super().__init__() lowercase__ : Any= nn.Linear(3 , 4 ) lowercase__ : Tuple= nn.BatchNormad(4 ) lowercase__ : Dict= nn.Linear(4 , 5 ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(snake_case__ ) ) ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def UpperCAmelCase_ ( self , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' return (args[0] + 1,) + args[1:], kwargs class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' return output + 1 class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= ModelForTest() lowercase__ : str= ModelHook() add_hook_to_module(snake_case__ , snake_case__ ) self.assertEqual(test_model._hf_hook , snake_case__ ) self.assertTrue(hasattr(snake_case__ , "_old_forward" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , "forward" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] ) remove_hook_from_module(snake_case__ ) self.assertFalse(hasattr(snake_case__ , "_hf_hook" ) ) self.assertFalse(hasattr(snake_case__ , "_old_forward" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= ModelForTest() lowercase__ : int= ModelHook() add_hook_to_module(snake_case__ , snake_case__ ) add_hook_to_module(snake_case__ , snake_case__ , append=snake_case__ ) self.assertEqual(isinstance(test_model._hf_hook , snake_case__ ) , snake_case__ ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(snake_case__ , "_old_forward" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , "forward" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] ) remove_hook_from_module(snake_case__ ) self.assertFalse(hasattr(snake_case__ , "_hf_hook" ) ) self.assertFalse(hasattr(snake_case__ , "_old_forward" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= ModelForTest() lowercase__ : int= torch.randn(2 , 3 ) lowercase__ : Optional[Any]= test_model(x + 1 ) lowercase__ : Tuple= test_model(x + 2 ) lowercase__ : str= PreForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) lowercase__ : Tuple= test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowercase__ : Tuple= PreForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) lowercase__ : Optional[Any]= test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowercase__ : List[str]= SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(snake_case__ , snake_case__ ) lowercase__ : Dict= test_model(snake_case__ ) assert torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= ModelForTest() lowercase__ : Optional[int]= torch.randn(2 , 3 ) lowercase__ : Optional[int]= test_model(snake_case__ ) lowercase__ : str= PostForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) lowercase__ : Optional[int]= test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , output + 1 , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowercase__ : Tuple= PostForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) lowercase__ : Dict= test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , output + 1 , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowercase__ : Optional[Any]= SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(snake_case__ , snake_case__ ) lowercase__ : List[str]= test_model(snake_case__ ) assert torch.allclose(snake_case__ , output + 2 , atol=1e-5 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= ModelForTest() lowercase__ : Optional[Any]= torch.randn(2 , 3 ) lowercase__ : int= test_model(snake_case__ ) lowercase__ : Union[str, Any]= PostForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) lowercase__ : Dict= test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , output + 1 ) ) self.assertTrue(outputa.requires_grad ) lowercase__ : Any= True lowercase__ : Optional[int]= test_model(snake_case__ ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device lowercase__ : int= torch.randn(2 , 3 ) lowercase__ : List[str]= model(snake_case__ ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(snake_case__ , AlignDevicesHook(io_same_device=snake_case__ ) ) lowercase__ : Tuple= torch.randn(2 , 3 ).to(0 ) lowercase__ : Optional[Any]= model(snake_case__ ) self.assertEqual(output.device , torch.device(0 ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices lowercase__ : Optional[int]= {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True} add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) # Buffers are not included in the offload by default, so are on the execution device lowercase__ : Optional[int]= torch.device(hook_kwargs["execution_device"] ) self.assertEqual(model.batchnorm.running_mean.device , snake_case__ ) lowercase__ : List[Any]= torch.randn(2 , 3 ) lowercase__ : str= model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # Now test with buffers included in the offload lowercase__ : Optional[int]= { "execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True, "offload_buffers": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) ) lowercase__ : str= torch.randn(2 , 3 ) lowercase__ : str= model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices lowercase__ : str= 0 if torch.cuda.is_available() else "cpu" attach_align_device_hook(snake_case__ , execution_device=snake_case__ , offload=snake_case__ ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) # Buffers are not included in the offload by default, so are on the execution device lowercase__ : Dict= torch.device(snake_case__ ) self.assertEqual(model.batchnorm.running_mean.device , snake_case__ ) lowercase__ : Optional[Any]= torch.randn(2 , 3 ) lowercase__ : List[Any]= model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case__ ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # Now test with buffers included in the offload attach_align_device_hook(snake_case__ , execution_device=snake_case__ , offload=snake_case__ , offload_buffers=snake_case__ ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) ) lowercase__ : List[str]= torch.randn(2 , 3 ) lowercase__ : List[Any]= model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case__ ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices lowercase__ : Optional[Any]= 0 if torch.cuda.is_available() else "cpu" attach_align_device_hook( snake_case__ , execution_device=snake_case__ , offload=snake_case__ , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) # Buffers are not included in the offload by default, so are on the execution device lowercase__ : Tuple= torch.device(snake_case__ ) self.assertEqual(model.batchnorm.running_mean.device , snake_case__ ) lowercase__ : str= torch.randn(2 , 3 ) lowercase__ : List[Any]= model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case__ ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # Now test with buffers included in the offload attach_align_device_hook( snake_case__ , execution_device=snake_case__ , offload=snake_case__ , weights_map=model.state_dict() , offload_buffers=snake_case__ , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) ) lowercase__ : Dict= torch.randn(2 , 3 ) lowercase__ : List[str]= model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case__ ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
150
0
'''simple docstring''' from __future__ import annotations import math def _SCREAMING_SNAKE_CASE (A ) -> list[int]: """simple docstring""" if num <= 0: lowercase__ = f"{num}: Invalid input, please enter a positive integer." raise ValueError(A ) lowercase__ = [True] * (num + 1) lowercase__ = [] lowercase__ = 2 lowercase__ = int(math.sqrt(A ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(A ) # Set multiples of start be False for i in range(start * start , num + 1 , A ): if sieve[i] is True: lowercase__ = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(A ) return prime if __name__ == "__main__": print(prime_sieve(int(input('Enter a positive integer: ').strip())))
2
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING lowercase__ : Dict = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase__ ) class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Optional[Any] , **__lowercase : Union[str, Any] ): """simple docstring""" super().__init__(**__lowercase ) requires_backends(self , "vision" ) requires_backends(self , "torch" ) if self.framework != "pt": raise ValueError(f"The {self.__class__} is only available in PyTorch." ) self.check_model_type(__lowercase ) def snake_case__ ( self : Optional[int] , **__lowercase : Optional[Any] ): """simple docstring""" snake_case_ = {} snake_case_ = {} snake_case_ = {} # preprocess args if "points_per_batch" in kwargs: snake_case_ = kwargs["points_per_batch"] if "points_per_crop" in kwargs: snake_case_ = kwargs["points_per_crop"] if "crops_n_layers" in kwargs: snake_case_ = kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: snake_case_ = kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: snake_case_ = kwargs["crop_n_points_downscale_factor"] # postprocess args if "pred_iou_thresh" in kwargs: snake_case_ = kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: snake_case_ = kwargs["stability_score_offset"] if "mask_threshold" in kwargs: snake_case_ = kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: snake_case_ = kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: snake_case_ = kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: snake_case_ = kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: snake_case_ = kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : Optional[int] , __lowercase : List[str] , *__lowercase : Optional[Any] , __lowercase : Dict=None , __lowercase : List[str]=None , **__lowercase : Optional[Any] ): """simple docstring""" return super().__call__(__lowercase , *__lowercase , num_workers=__lowercase , batch_size=__lowercase , **__lowercase ) def snake_case__ ( self : str , __lowercase : int , __lowercase : List[str]=64 , __lowercase : int = 0 , __lowercase : float = 5_12 / 15_00 , __lowercase : Optional[int] = 32 , __lowercase : Optional[int] = 1 , ): """simple docstring""" snake_case_ = load_image(__lowercase ) snake_case_ = self.image_processor.size["longest_edge"] snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.image_processor.generate_crop_boxes( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) snake_case_ = self.image_processor(images=__lowercase , return_tensors="pt" ) with self.device_placement(): if self.framework == "pt": snake_case_ = self.get_inference_context() with inference_context(): snake_case_ = self._ensure_tensor_on_device(__lowercase , device=self.device ) snake_case_ = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) ) snake_case_ = image_embeddings snake_case_ = grid_points.shape[1] snake_case_ = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None" ) for i in range(0 , __lowercase , __lowercase ): snake_case_ = grid_points[:, i : i + points_per_batch, :, :] snake_case_ = input_labels[:, i : i + points_per_batch] snake_case_ = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def snake_case__ ( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Union[str, Any]=0.88 , __lowercase : Union[str, Any]=0.95 , __lowercase : int=0 , __lowercase : int=1 , ): """simple docstring""" snake_case_ = model_inputs.pop("input_boxes" ) snake_case_ = model_inputs.pop("is_last" ) snake_case_ = model_inputs.pop("original_sizes" ).tolist() snake_case_ = model_inputs.pop("reshaped_input_sizes" ).tolist() snake_case_ = self.model(**__lowercase ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks snake_case_ = model_outputs["pred_masks"] snake_case_ = self.image_processor.post_process_masks( __lowercase , __lowercase , __lowercase , __lowercase , binarize=__lowercase ) snake_case_ = model_outputs["iou_scores"] snake_case_ , snake_case_ , snake_case_ = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __lowercase , __lowercase , __lowercase , __lowercase , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def snake_case__ ( self : str , __lowercase : Any , __lowercase : Optional[int]=False , __lowercase : int=False , __lowercase : List[str]=0.7 , ): """simple docstring""" snake_case_ = [] snake_case_ = [] snake_case_ = [] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores" ) ) all_masks.extend(model_output.pop("masks" ) ) all_boxes.append(model_output.pop("boxes" ) ) snake_case_ = torch.cat(__lowercase ) snake_case_ = torch.cat(__lowercase ) snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.image_processor.post_process_for_mask_generation( __lowercase , __lowercase , __lowercase , __lowercase ) snake_case_ = defaultdict(__lowercase ) for output in model_outputs: for k, v in output.items(): extra[k].append(__lowercase ) snake_case_ = {} if output_rle_mask: snake_case_ = rle_mask if output_bboxes_mask: snake_case_ = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
187
0
'''simple docstring''' import math def __magic_name__( lowerCamelCase): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(lowerCamelCase) + 1), 6): if number % i == 0 or number % (i + 2) == 0: return False return True def __magic_name__( lowerCamelCase = 0.1): __lowerCAmelCase = 3 __lowerCAmelCase = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1): primes += is_prime(lowerCamelCase) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
367
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class a__ ( unittest.TestCase ): """simple docstring""" def __init__(self , __lowercase , __lowercase = True , __lowercase = None , __lowercase = 32 , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = True , __lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowercase = True , __lowercase=7 , __lowercase=30 , __lowercase=4_00 , __lowercase=3 , ): __lowerCAmelCase = parent __lowerCAmelCase = do_resize __lowerCAmelCase = size if size is not None else {'''shortest_edge''': 2_88} __lowerCAmelCase = size_divisor __lowerCAmelCase = do_rescale __lowerCAmelCase = rescale_factor __lowerCAmelCase = do_normalize __lowerCAmelCase = do_center_crop __lowerCAmelCase = image_mean __lowerCAmelCase = image_std __lowerCAmelCase = do_pad __lowerCAmelCase = batch_size __lowerCAmelCase = num_channels __lowerCAmelCase = min_resolution __lowerCAmelCase = max_resolution def _snake_case (self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def _snake_case (self , __lowercase , __lowercase=False ): if not batched: __lowerCAmelCase = self.size['''shortest_edge'''] __lowerCAmelCase = image_inputs[0] if isinstance(__lowercase , Image.Image ): __lowerCAmelCase , __lowerCAmelCase = image.size else: __lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2] __lowerCAmelCase = size / min(__lowercase , __lowercase ) if h < w: __lowerCAmelCase , __lowerCAmelCase = size, scale * w else: __lowerCAmelCase , __lowerCAmelCase = scale * h, size __lowerCAmelCase = int((13_33 / 8_00) * size ) if max(__lowercase , __lowercase ) > max_size: __lowerCAmelCase = max_size / max(__lowercase , __lowercase ) __lowerCAmelCase = newh * scale __lowerCAmelCase = neww * scale __lowerCAmelCase , __lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 ) __lowerCAmelCase , __lowerCAmelCase = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __lowerCAmelCase = [] for image in image_inputs: __lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[0] )[0] __lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Any = BridgeTowerImageProcessor if is_vision_available() else None def _snake_case (self ): __lowerCAmelCase = BridgeTowerImageProcessingTester(self ) @property def _snake_case (self ): return self.image_processor_tester.prepare_image_processor_dict() def _snake_case (self ): __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowercase , '''image_std''' ) ) self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowercase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowercase , '''size''' ) ) self.assertTrue(hasattr(__lowercase , '''size_divisor''' ) ) def _snake_case (self ): pass def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , Image.Image ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , np.ndarray ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , torch.Tensor ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
9
0
"""simple docstring""" import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def lowerCamelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[Any]: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Optional[int] = list(_UpperCamelCase ) for i in range(len(_UpperCamelCase ) ): __UpperCAmelCase : Optional[int] = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def lowerCamelCase ( _UpperCamelCase : Exception ) -> bool: '''simple docstring''' __UpperCAmelCase : List[str] = [ """CUDA out of memory.""", # CUDA OOM """cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU """DefaultCPUAllocator: can't allocate memory""", # CPU OOM ] if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def lowerCamelCase ( _UpperCamelCase : callable = None , _UpperCamelCase : int = 1_2_8 ) -> int: '''simple docstring''' if function is None: return functools.partial(_UpperCamelCase , starting_batch_size=_UpperCamelCase ) __UpperCAmelCase : List[str] = starting_batch_size def decorator(*_UpperCamelCase : str , **_UpperCamelCase : Any ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() __UpperCAmelCase : int = list(inspect.signature(_UpperCamelCase ).parameters.keys() ) # Guard against user error if len(_UpperCamelCase ) < (len(_UpperCamelCase ) + 1): __UpperCAmelCase : str = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( f'''Batch size was passed into `{function.__name__}` as the first argument when called.''' f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError("""No executable batch size found, reached zero.""" ) try: return function(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ) except Exception as e: if should_reduce_batch_size(_UpperCamelCase ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
115
"""simple docstring""" import numpy as np UpperCAmelCase : Optional[Any] = [ ['a', 'b', 'c', 'd', 'e'], ['f', 'g', 'h', 'i', 'k'], ['l', 'm', 'n', 'o', 'p'], ['q', 'r', 's', 't', 'u'], ['v', 'w', 'x', 'y', 'z'], ] class lowerCamelCase__ : """simple docstring""" def __init__( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = np.array(UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = np.where(letter == self.SQUARE ) __UpperCAmelCase : Optional[Any] = np.concatenate([indexa + 1, indexa + 1] ) return indexes def lowerCamelCase__ ( self : Any , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Tuple = self.SQUARE[indexa - 1, indexa - 1] return letter def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : str = message.lower() __UpperCAmelCase : List[Any] = message.replace(""" """ , """""" ) __UpperCAmelCase : List[Any] = message.replace("""j""" , """i""" ) __UpperCAmelCase : Optional[int] = np.empty((2, len(UpperCamelCase )) ) for letter_index in range(len(UpperCamelCase ) ): __UpperCAmelCase : List[Any] = self.letter_to_numbers(message[letter_index] ) __UpperCAmelCase : str = numbers[0] __UpperCAmelCase : int = numbers[1] __UpperCAmelCase : Union[str, Any] = first_step.reshape(2 * len(UpperCamelCase ) ) __UpperCAmelCase : Optional[Any] = """""" for numbers_index in range(len(UpperCamelCase ) ): __UpperCAmelCase : Any = int(second_step[numbers_index * 2] ) __UpperCAmelCase : Any = int(second_step[(numbers_index * 2) + 1] ) __UpperCAmelCase : str = self.numbers_to_letter(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = encoded_message + letter return encoded_message def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Any = message.lower() message.replace(""" """ , """""" ) __UpperCAmelCase : int = np.empty(2 * len(UpperCamelCase ) ) for letter_index in range(len(UpperCamelCase ) ): __UpperCAmelCase : Any = self.letter_to_numbers(message[letter_index] ) __UpperCAmelCase : Any = numbers[0] __UpperCAmelCase : Dict = numbers[1] __UpperCAmelCase : str = first_step.reshape((2, len(UpperCamelCase )) ) __UpperCAmelCase : Union[str, Any] = """""" for numbers_index in range(len(UpperCamelCase ) ): __UpperCAmelCase : Optional[int] = int(second_step[0, numbers_index] ) __UpperCAmelCase : Tuple = int(second_step[1, numbers_index] ) __UpperCAmelCase : Tuple = self.numbers_to_letter(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = decoded_message + letter return decoded_message
115
1
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def lowerCAmelCase__ ( _a : str , _a : float | Decimal , _a : float = 10**-10 ): snake_case_ : Optional[int] = a while True: snake_case_ : Tuple = Decimal(_a ) - ( Decimal(eval(_a ) ) / Decimal(eval(str(diff(_a ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(_a ) ) < precision: # noqa: S307 return float(_a ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""") # Find root of polynomial print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""") # Find Square Root of 5 print(F"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""") # Exponential Roots print(F"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
370
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def lowerCAmelCase__ ( ): snake_case_ : str = ArgumentParser( description=( "PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores" , type=_a , default=1 , help="Number of TPU cores to use (1 or 8)." ) # positional parser.add_argument( "training_script" , type=_a , help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ) , ) # rest from the training program parser.add_argument("training_script_args" , nargs=_a ) return parser.parse_args() def lowerCAmelCase__ ( ): snake_case_ : str = parse_args() # Import training_script as a module. snake_case_ : Any = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) snake_case_ : Tuple = script_fpath.stem snake_case_ : str = importlib.import_module(_a ) # Patch sys.argv snake_case_ : Optional[int] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
36
0
"""simple docstring""" import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) _UpperCamelCase: Union[str, Any] = { 'sample_size': 3_2, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': 1_0_0_0, 'block_out_channels': [3_2, 6_4], 'attention_head_dim': 8, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } _UpperCamelCase: Union[str, Any] = { 'sample_size': 6_4, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 3, 'num_class_embeds': 1_0_0_0, 'block_out_channels': [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4], 'attention_head_dim': 6_4, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } _UpperCamelCase: int = { 'sample_size': 2_5_6, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': None, 'block_out_channels': [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4], 'attention_head_dim': 6_4, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'default', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } _UpperCamelCase: Optional[Any] = { 'num_train_timesteps': 4_0, 'sigma_min': 0.0_0_2, 'sigma_max': 8_0.0, } _UpperCamelCase: Any = { 'num_train_timesteps': 2_0_1, 'sigma_min': 0.0_0_2, 'sigma_max': 8_0.0, } _UpperCamelCase: Dict = { 'num_train_timesteps': 1_5_1, 'sigma_min': 0.0_0_2, 'sigma_max': 8_0.0, } def lowercase__ ( _UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' if isinstance(_UpperCAmelCase , _UpperCAmelCase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any: '''simple docstring''' lowercase : Dict = checkpoint[f'''{old_prefix}.in_layers.0.weight'''] lowercase : Tuple = checkpoint[f'''{old_prefix}.in_layers.0.bias'''] lowercase : Optional[Any] = checkpoint[f'''{old_prefix}.in_layers.2.weight'''] lowercase : Union[str, Any] = checkpoint[f'''{old_prefix}.in_layers.2.bias'''] lowercase : List[Any] = checkpoint[f'''{old_prefix}.emb_layers.1.weight'''] lowercase : str = checkpoint[f'''{old_prefix}.emb_layers.1.bias'''] lowercase : Optional[Any] = checkpoint[f'''{old_prefix}.out_layers.0.weight'''] lowercase : Dict = checkpoint[f'''{old_prefix}.out_layers.0.bias'''] lowercase : Dict = checkpoint[f'''{old_prefix}.out_layers.3.weight'''] lowercase : List[Any] = checkpoint[f'''{old_prefix}.out_layers.3.bias'''] if has_skip: lowercase : Any = checkpoint[f'''{old_prefix}.skip_connection.weight'''] lowercase : Tuple = checkpoint[f'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ) -> Dict: '''simple docstring''' lowercase , lowercase , lowercase : Dict = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 ) lowercase , lowercase , lowercase : int = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 ) lowercase : int = checkpoint[f'''{old_prefix}.norm.weight'''] lowercase : int = checkpoint[f'''{old_prefix}.norm.bias'''] lowercase : Dict = weight_q.squeeze(-1 ).squeeze(-1 ) lowercase : Optional[int] = bias_q.squeeze(-1 ).squeeze(-1 ) lowercase : Optional[Any] = weight_k.squeeze(-1 ).squeeze(-1 ) lowercase : int = bias_k.squeeze(-1 ).squeeze(-1 ) lowercase : Dict = weight_v.squeeze(-1 ).squeeze(-1 ) lowercase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 ) lowercase : Tuple = ( checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) lowercase : str = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> str: '''simple docstring''' lowercase : Optional[Any] = torch.load(_UpperCAmelCase , map_location='cpu' ) lowercase : str = {} lowercase : List[str] = checkpoint['time_embed.0.weight'] lowercase : Dict = checkpoint['time_embed.0.bias'] lowercase : Optional[Any] = checkpoint['time_embed.2.weight'] lowercase : int = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: lowercase : Dict = checkpoint['label_emb.weight'] lowercase : List[Any] = checkpoint['input_blocks.0.0.weight'] lowercase : Union[str, Any] = checkpoint['input_blocks.0.0.bias'] lowercase : Optional[int] = unet_config['down_block_types'] lowercase : Dict = unet_config['layers_per_block'] lowercase : Tuple = unet_config['attention_head_dim'] lowercase : Union[str, Any] = unet_config['block_out_channels'] lowercase : str = 1 lowercase : str = channels_list[0] for i, layer_type in enumerate(_UpperCAmelCase ): lowercase : Dict = channels_list[i] lowercase : Optional[int] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(_UpperCAmelCase ): lowercase : Union[str, Any] = f'''down_blocks.{i}.resnets.{j}''' lowercase : Optional[int] = f'''input_blocks.{current_layer}.0''' lowercase : Dict = True if j == 0 and downsample_block_has_skip else False lowercase : Tuple = convert_resnet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , has_skip=_UpperCAmelCase ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(_UpperCAmelCase ): lowercase : List[Any] = f'''down_blocks.{i}.resnets.{j}''' lowercase : str = f'''input_blocks.{current_layer}.0''' lowercase : Optional[Any] = True if j == 0 and downsample_block_has_skip else False lowercase : List[Any] = convert_resnet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , has_skip=_UpperCAmelCase ) lowercase : Optional[Any] = f'''down_blocks.{i}.attentions.{j}''' lowercase : Tuple = f'''input_blocks.{current_layer}.1''' lowercase : List[str] = convert_attention( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) current_layer += 1 if i != len(_UpperCAmelCase ) - 1: lowercase : Any = f'''down_blocks.{i}.downsamplers.0''' lowercase : Any = f'''input_blocks.{current_layer}.0''' lowercase : Optional[int] = convert_resnet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) current_layer += 1 lowercase : Tuple = current_channels # hardcoded the mid-block for now lowercase : List[str] = 'mid_block.resnets.0' lowercase : str = 'middle_block.0' lowercase : str = convert_resnet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowercase : List[str] = 'mid_block.attentions.0' lowercase : List[str] = 'middle_block.1' lowercase : Dict = convert_attention(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowercase : Any = 'mid_block.resnets.1' lowercase : List[str] = 'middle_block.2' lowercase : str = convert_resnet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowercase : Any = 0 lowercase : Optional[Any] = unet_config['up_block_types'] for i, layer_type in enumerate(_UpperCAmelCase ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): lowercase : Dict = f'''up_blocks.{i}.resnets.{j}''' lowercase : Union[str, Any] = f'''output_blocks.{current_layer}.0''' lowercase : str = convert_resnet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , has_skip=_UpperCAmelCase ) current_layer += 1 if i != len(_UpperCAmelCase ) - 1: lowercase : List[str] = f'''up_blocks.{i}.upsamplers.0''' lowercase : List[Any] = f'''output_blocks.{current_layer-1}.1''' lowercase : int = convert_resnet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): lowercase : Optional[Any] = f'''up_blocks.{i}.resnets.{j}''' lowercase : int = f'''output_blocks.{current_layer}.0''' lowercase : int = convert_resnet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , has_skip=_UpperCAmelCase ) lowercase : Dict = f'''up_blocks.{i}.attentions.{j}''' lowercase : List[str] = f'''output_blocks.{current_layer}.1''' lowercase : Dict = convert_attention( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) current_layer += 1 if i != len(_UpperCAmelCase ) - 1: lowercase : int = f'''up_blocks.{i}.upsamplers.0''' lowercase : List[str] = f'''output_blocks.{current_layer-1}.2''' lowercase : Optional[Any] = convert_resnet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowercase : str = checkpoint['out.0.weight'] lowercase : Any = checkpoint['out.0.bias'] lowercase : Optional[int] = checkpoint['out.2.weight'] lowercase : int = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": _UpperCamelCase: int = argparse.ArgumentParser() parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.') parser.add_argument( '--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.' ) parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.') _UpperCamelCase: Optional[int] = parser.parse_args() _UpperCamelCase: List[str] = strabool(args.class_cond) _UpperCamelCase: int = os.path.basename(args.unet_path) print(f'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: _UpperCamelCase: str = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _UpperCamelCase: Any = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: _UpperCamelCase: List[str] = TEST_UNET_CONFIG else: raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: _UpperCamelCase: Optional[Any] = None _UpperCamelCase: List[Any] = con_pt_to_diffuser(args.unet_path, unet_config) _UpperCamelCase: str = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: _UpperCamelCase: int = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: _UpperCamelCase: Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _UpperCamelCase: Any = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''') _UpperCamelCase: Tuple = CMStochasticIterativeScheduler(**scheduler_config) _UpperCamelCase: Optional[int] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
255
"""simple docstring""" from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class a__ ( SCREAMING_SNAKE_CASE__ ): _lowerCamelCase = 42 class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): @register_to_config def __init__( self : Optional[int], lowerCAmelCase : int = 32, lowerCAmelCase : int = 64, lowerCAmelCase : int = 20, lowerCAmelCase : int = 768, lowerCAmelCase : Optional[Any]=77, lowerCAmelCase : Tuple=4, lowerCAmelCase : float = 0.0, lowerCAmelCase : str = "silu", lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[str] = "linear", lowerCAmelCase : Optional[str] = "prd", lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Optional[int] = None, ) -> List[Any]: super().__init__() lowercase : List[Any] = num_attention_heads lowercase : int = attention_head_dim lowercase : List[Any] = num_attention_heads * attention_head_dim lowercase : Tuple = additional_embeddings lowercase : Dict = time_embed_dim or inner_dim lowercase : Optional[Any] = embedding_proj_dim or embedding_dim lowercase : int = clip_embed_dim or embedding_dim lowercase : List[str] = Timesteps(lowerCAmelCase, lowerCAmelCase, 0 ) lowercase : List[str] = TimestepEmbedding(lowerCAmelCase, lowerCAmelCase, out_dim=lowerCAmelCase, act_fn=lowerCAmelCase ) lowercase : List[str] = nn.Linear(lowerCAmelCase, lowerCAmelCase ) if embedding_proj_norm_type is None: lowercase : str = None elif embedding_proj_norm_type == "layer": lowercase : Tuple = nn.LayerNorm(lowerCAmelCase ) else: raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' ) lowercase : List[str] = nn.Linear(lowerCAmelCase, lowerCAmelCase ) if encoder_hid_proj_type is None: lowercase : Optional[int] = None elif encoder_hid_proj_type == "linear": lowercase : Dict = nn.Linear(lowerCAmelCase, lowerCAmelCase ) else: raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' ) lowercase : Dict = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, lowerCAmelCase ) ) if added_emb_type == "prd": lowercase : Union[str, Any] = nn.Parameter(torch.zeros(1, 1, lowerCAmelCase ) ) elif added_emb_type is None: lowercase : str = None else: raise ValueError( f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' ) lowercase : Dict = nn.ModuleList( [ BasicTransformerBlock( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, dropout=lowerCAmelCase, activation_fn='gelu', attention_bias=lowerCAmelCase, ) for d in range(lowerCAmelCase ) ] ) if norm_in_type == "layer": lowercase : str = nn.LayerNorm(lowerCAmelCase ) elif norm_in_type is None: lowercase : Optional[int] = None else: raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' ) lowercase : int = nn.LayerNorm(lowerCAmelCase ) lowercase : str = nn.Linear(lowerCAmelCase, lowerCAmelCase ) lowercase : Optional[Any] = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -1_0000.0 ) causal_attention_mask.triu_(1 ) lowercase : List[str] = causal_attention_mask[None, ...] self.register_buffer('causal_attention_mask', lowerCAmelCase, persistent=lowerCAmelCase ) lowercase : Any = nn.Parameter(torch.zeros(1, lowerCAmelCase ) ) lowercase : Any = nn.Parameter(torch.zeros(1, lowerCAmelCase ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowercase ( self : Tuple ) -> Dict[str, AttentionProcessor]: lowercase : Any = {} def fn_recursive_add_processors(lowerCAmelCase : str, lowerCAmelCase : torch.nn.Module, lowerCAmelCase : Dict[str, AttentionProcessor] ): if hasattr(lowerCAmelCase, 'set_processor' ): lowercase : List[str] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f'''{name}.{sub_name}''', lowerCAmelCase, lowerCAmelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) return processors def lowercase ( self : Union[str, Any], lowerCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Tuple: lowercase : str = len(self.attn_processors.keys() ) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != count: raise ValueError( f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the''' f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' ) def fn_recursive_attn_processor(lowerCAmelCase : str, lowerCAmelCase : torch.nn.Module, lowerCAmelCase : Union[str, Any] ): if hasattr(lowerCAmelCase, 'set_processor' ): if not isinstance(lowerCAmelCase, lowerCAmelCase ): module.set_processor(lowerCAmelCase ) else: module.set_processor(processor.pop(f'''{name}.processor''' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f'''{name}.{sub_name}''', lowerCAmelCase, lowerCAmelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def lowercase ( self : Optional[Any] ) -> Optional[Any]: self.set_attn_processor(AttnProcessor() ) def lowercase ( self : Any, lowerCAmelCase : int, lowerCAmelCase : Union[torch.Tensor, float, int], lowerCAmelCase : torch.FloatTensor, lowerCAmelCase : Optional[torch.FloatTensor] = None, lowerCAmelCase : Optional[torch.BoolTensor] = None, lowerCAmelCase : bool = True, ) -> List[Any]: lowercase : Optional[Any] = hidden_states.shape[0] lowercase : Union[str, Any] = timestep if not torch.is_tensor(lowerCAmelCase ): lowercase : List[str] = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device ) elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0: lowercase : List[str] = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase : Optional[int] = timesteps * torch.ones(lowerCAmelCase, dtype=timesteps.dtype, device=timesteps.device ) lowercase : Dict = self.time_proj(lowerCAmelCase ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. lowercase : Optional[int] = timesteps_projected.to(dtype=self.dtype ) lowercase : Any = self.time_embedding(lowerCAmelCase ) if self.embedding_proj_norm is not None: lowercase : Any = self.embedding_proj_norm(lowerCAmelCase ) lowercase : List[str] = self.embedding_proj(lowerCAmelCase ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: lowercase : str = self.encoder_hidden_states_proj(lowerCAmelCase ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' ) lowercase : Optional[Any] = self.proj_in(lowerCAmelCase ) lowercase : Optional[int] = self.positional_embedding.to(hidden_states.dtype ) lowercase : Dict = [] lowercase : Optional[int] = 0 if encoder_hidden_states is not None: additional_embeds.append(lowerCAmelCase ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: lowercase : str = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: lowercase : Union[str, Any] = hidden_states[:, None, :] lowercase : int = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: lowercase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase, -1, -1 ) additional_embeds.append(lowerCAmelCase ) lowercase : Union[str, Any] = torch.cat( lowerCAmelCase, dim=1, ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens lowercase : Optional[int] = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: lowercase : List[Any] = F.pad( lowerCAmelCase, ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ), value=0.0, ) lowercase : str = hidden_states + positional_embeddings if attention_mask is not None: lowercase : Tuple = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0 lowercase : List[Any] = F.pad(lowerCAmelCase, (0, self.additional_embeddings), value=0.0 ) lowercase : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) lowercase : Union[str, Any] = attention_mask.repeat_interleave(self.config.num_attention_heads, dim=0 ) if self.norm_in is not None: lowercase : List[Any] = self.norm_in(lowerCAmelCase ) for block in self.transformer_blocks: lowercase : Tuple = block(lowerCAmelCase, attention_mask=lowerCAmelCase ) lowercase : Optional[Any] = self.norm_out(lowerCAmelCase ) if self.prd_embedding is not None: lowercase : Optional[Any] = hidden_states[:, -1] else: lowercase : Any = hidden_states[:, additional_embeddings_len:] lowercase : Optional[int] = self.proj_to_clip_embeddings(lowerCAmelCase ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase ) def lowercase ( self : Any, lowerCAmelCase : Dict ) -> Dict: lowercase : int = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
255
1
'''simple docstring''' def _A ( _lowerCAmelCase ): """simple docstring""" if any(not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or x < 0 for x in sequence ): raise TypeError('Sequence must be list of non-negative integers' ) for _ in range(len(_lowerCAmelCase ) ): for i, (rod_upper, rod_lower) in enumerate(zip(_lowerCAmelCase , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
48
'''simple docstring''' import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _UpperCamelCase ( A , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = XLMTokenizer lowerCAmelCase__ = False def __lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowercase =[ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] __lowercase =dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase)))) __lowercase =['l o 123', 'lo w 1456', 'e r</w> 1789', ''] __lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) __lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w') as fp: fp.write(json.dumps(_lowerCAmelCase)) with open(self.merges_file , 'w') as fp: fp.write('\n'.join(_lowerCAmelCase)) def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : Any): '''simple docstring''' __lowercase ='lower newer' __lowercase ='lower newer' return input_text, output_text def __lowerCamelCase ( self : str): '''simple docstring''' __lowercase =XLMTokenizer(self.vocab_file , self.merges_file) __lowercase ='lower' __lowercase =['low', 'er</w>'] __lowercase =tokenizer.tokenize(_lowerCAmelCase) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase) __lowercase =tokens + ['<unk>'] __lowercase =[1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase) , _lowerCAmelCase) @slow def __lowerCamelCase ( self : str): '''simple docstring''' __lowercase =XLMTokenizer.from_pretrained('xlm-mlm-en-2048') __lowercase =tokenizer.encode('sequence builders' , add_special_tokens=_lowerCAmelCase) __lowercase =tokenizer.encode('multi-sequence build' , add_special_tokens=_lowerCAmelCase) __lowercase =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase) __lowercase =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
48
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = """cvt""" def __init__( self , lowercase_=3 , lowercase_=[7, 3, 3] , lowercase_=[4, 2, 2] , lowercase_=[2, 1, 1] , lowercase_=[64, 192, 384] , lowercase_=[1, 3, 6] , lowercase_=[1, 2, 10] , lowercase_=[4.0, 4.0, 4.0] , lowercase_=[0.0, 0.0, 0.0] , lowercase_=[0.0, 0.0, 0.0] , lowercase_=[0.0, 0.0, 0.1] , lowercase_=[True, True, True] , lowercase_=[False, False, True] , lowercase_=["dw_bn", "dw_bn", "dw_bn"] , lowercase_=[3, 3, 3] , lowercase_=[1, 1, 1] , lowercase_=[2, 2, 2] , lowercase_=[1, 1, 1] , lowercase_=[1, 1, 1] , lowercase_=0.02 , lowercase_=1E-1_2 , **lowercase_ , ): """simple docstring""" super().__init__(**lowercase_ ) UpperCAmelCase_ : Tuple = num_channels UpperCAmelCase_ : int = patch_sizes UpperCAmelCase_ : Dict = patch_stride UpperCAmelCase_ : Union[str, Any] = patch_padding UpperCAmelCase_ : Optional[int] = embed_dim UpperCAmelCase_ : Tuple = num_heads UpperCAmelCase_ : Tuple = depth UpperCAmelCase_ : Optional[Any] = mlp_ratio UpperCAmelCase_ : List[Any] = attention_drop_rate UpperCAmelCase_ : str = drop_rate UpperCAmelCase_ : Any = drop_path_rate UpperCAmelCase_ : Union[str, Any] = qkv_bias UpperCAmelCase_ : Union[str, Any] = cls_token UpperCAmelCase_ : Any = qkv_projection_method UpperCAmelCase_ : Optional[Any] = kernel_qkv UpperCAmelCase_ : int = padding_kv UpperCAmelCase_ : List[Any] = stride_kv UpperCAmelCase_ : Tuple = padding_q UpperCAmelCase_ : int = stride_q UpperCAmelCase_ : Any = initializer_range UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
61
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html _a = 'platform' import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ): if attention_mask is None: UpperCAmelCase_ : Union[str, Any] = np.where(input_ids != config.pad_token_id, 1, 0 ) if decoder_attention_mask is None: UpperCAmelCase_ : Optional[int] = np.where(decoder_input_ids != config.pad_token_id, 1, 0 ) if head_mask is None: UpperCAmelCase_ : int = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase_ : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase_ : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class A_ : '''simple docstring''' def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=16 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=32 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=0.02 , ): """simple docstring""" UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : Tuple = batch_size UpperCAmelCase_ : str = seq_length UpperCAmelCase_ : Dict = is_training UpperCAmelCase_ : List[Any] = use_labels UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : int = hidden_size UpperCAmelCase_ : Optional[Any] = num_hidden_layers UpperCAmelCase_ : Dict = num_attention_heads UpperCAmelCase_ : List[str] = intermediate_size UpperCAmelCase_ : Optional[int] = hidden_act UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : int = attention_probs_dropout_prob UpperCAmelCase_ : Optional[Any] = max_position_embeddings UpperCAmelCase_ : str = eos_token_id UpperCAmelCase_ : str = pad_token_id UpperCAmelCase_ : str = bos_token_id UpperCAmelCase_ : List[Any] = initializer_range def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) UpperCAmelCase_ : Any = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) UpperCAmelCase_ : str = shift_tokens_right(lowercase_ , 1 , 2 ) UpperCAmelCase_ : str = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , ) UpperCAmelCase_ : Optional[int] = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ ) return config, inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs() return config, inputs_dict def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = 20 UpperCAmelCase_ : int = model_class_name(lowercase_ ) UpperCAmelCase_ : Optional[int] = model.encode(inputs_dict["input_ids"] ) UpperCAmelCase_ , UpperCAmelCase_ : Any = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) UpperCAmelCase_ : Any = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ ) UpperCAmelCase_ : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) UpperCAmelCase_ : Union[str, Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase_ : int = model.decode( decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , ) UpperCAmelCase_ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) UpperCAmelCase_ : Dict = model.decode( decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , ) UpperCAmelCase_ : Optional[Any] = model.decode(lowercase_ , lowercase_ ) UpperCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = 20 UpperCAmelCase_ : Any = model_class_name(lowercase_ ) UpperCAmelCase_ : Tuple = model.encode(inputs_dict["input_ids"] ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) UpperCAmelCase_ : Optional[Any] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) UpperCAmelCase_ : int = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ ) UpperCAmelCase_ : List[str] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase_ : List[str] = model.decode( decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , ) UpperCAmelCase_ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) UpperCAmelCase_ : Dict = model.decode( decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , ) UpperCAmelCase_ : Dict = model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_ ) UpperCAmelCase_ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class A_ (unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = 99 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) UpperCAmelCase_ : Any = input_ids.shape[0] UpperCAmelCase_ : Dict = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._get_config_and_data() UpperCAmelCase_ : List[str] = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ ) UpperCAmelCase_ : Optional[int] = lm_model(input_ids=lowercase_ ) UpperCAmelCase_ : Optional[int] = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape , lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) UpperCAmelCase_ : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ ) UpperCAmelCase_ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) UpperCAmelCase_ : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) UpperCAmelCase_ : Tuple = lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_ ) UpperCAmelCase_ : Tuple = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape , lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) UpperCAmelCase_ : Dict = shift_tokens_right(lowercase_ , 1 , 2 ) UpperCAmelCase_ : Tuple = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum() UpperCAmelCase_ : Optional[Any] = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(lowercase_ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class A_ (lowercase__ ,unittest.TestCase ,lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) SCREAMING_SNAKE_CASE__ : List[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = FlaxBlenderbotSmallModelTester(self ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowercase_ , lowercase_ ) UpperCAmelCase_ : Dict = model_class(lowercase_ ) @jax.jit def encode_jitted(lowercase_ , lowercase_=None , **lowercase_ ): return model.encode(input_ids=lowercase_ , attention_mask=lowercase_ ) with self.subTest("JIT Enabled" ): UpperCAmelCase_ : List[Any] = encode_jitted(**lowercase_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): UpperCAmelCase_ : Optional[Any] = encode_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ , lowercase_ ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ : Optional[int] = model_class(lowercase_ ) UpperCAmelCase_ : Tuple = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) UpperCAmelCase_ : int = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(lowercase_ , lowercase_ , lowercase_ ): return model.decode( decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , ) with self.subTest("JIT Enabled" ): UpperCAmelCase_ : str = decode_jitted(**lowercase_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): UpperCAmelCase_ : List[Any] = decode_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ , lowercase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = model_class_name.from_pretrained("facebook/blenderbot_small-90M" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids UpperCAmelCase_ : List[str] = np.ones((1, 1) ) * model.config.eos_token_id UpperCAmelCase_ : Optional[int] = model(lowercase_ ) self.assertIsNotNone(lowercase_ )
61
1
from __future__ import annotations class a__ : def __init__( self , _A ): """simple docstring""" __lowerCAmelCase = TypeError( "Matrices must be formed from a list of zero or more lists containing at " "least one and the same number of values, each of which must be of type " "int or float." ) if len(_A ) != 0: __lowerCAmelCase = len(rows[0] ) if cols == 0: raise error for row in rows: if len(_A ) != cols: raise error for value in row: if not isinstance(_A , (int, float) ): raise error __lowerCAmelCase = rows else: __lowerCAmelCase = [] def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return len(self.rows ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return len(self.rows[0] ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return (self.num_rows, self.num_columns) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.order[0] == self.order[1] def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return bool(self.determinant() ) def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(_A ).determinant() def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" if (row + column) % 2 == 0: return self.get_minor(_A , _A ) return -1 * self.get_minor(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return Matrix( [ [self.get_minor(_A , _A ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.determinant() if not determinant: raise TypeError("Only matrices with a non-zero determinant have an inverse" ) return self.adjugate() * (1 / determinant) def __repr__( self ): """simple docstring""" return str(self.rows ) def __str__( self ): """simple docstring""" if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ "[" + ". ".join([str(_A ) for value in row] ) + ".]" for row in self.rows ] ) + "]" ) def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" __lowerCAmelCase = TypeError("Row must be a list containing all ints and/or floats" ) if not isinstance(_A , _A ): raise type_error for value in row: if not isinstance(_A , (int, float) ): raise type_error if len(_A ) != self.num_columns: raise ValueError( "Row must be equal in length to the other rows in the matrix" ) if position is None: self.rows.append(_A ) else: __lowerCAmelCase = self.rows[0:position] + [row] + self.rows[position:] def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" __lowerCAmelCase = TypeError( "Column must be a list containing all ints and/or floats" ) if not isinstance(_A , _A ): raise type_error for value in column: if not isinstance(_A , (int, float) ): raise type_error if len(_A ) != self.num_rows: raise ValueError( "Column must be equal in length to the other columns in the matrix" ) if position is None: __lowerCAmelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: __lowerCAmelCase = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self , _A ): """simple docstring""" if not isinstance(_A , _A ): return NotImplemented return self.rows == other.rows def __ne__( self , _A ): """simple docstring""" return not self == other def __neg__( self ): """simple docstring""" return self * -1 def __add__( self , _A ): """simple docstring""" if self.order != other.order: raise ValueError("Addition requires matrices of the same order" ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self , _A ): """simple docstring""" if self.order != other.order: raise ValueError("Subtraction requires matrices of the same order" ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self , _A ): """simple docstring""" if isinstance(_A , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(_A , _A ): if self.num_columns != other.num_rows: raise ValueError( "The number of columns in the first matrix must " "be equal to the number of rows in the second" ) return Matrix( [ [Matrix.dot_product(_A , _A ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( "A Matrix can only be multiplied by an int, float, or another matrix" ) def __pow__( self , _A ): """simple docstring""" if not isinstance(_A , _A ): raise TypeError("A Matrix can only be raised to the power of an int" ) if not self.is_square: raise ValueError("Only square matrices can be raised to a power" ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( "Only invertable matrices can be raised to a negative power" ) __lowerCAmelCase = self for _ in range(other - 1 ): result *= self return result @classmethod def __SCREAMING_SNAKE_CASE( cls , _A , _A ): """simple docstring""" return sum(row[i] * column[i] for i in range(len(_A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
363
from sklearn.metrics import mean_squared_error import datasets UpperCamelCase__ = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ UpperCamelCase__ = """\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. """ UpperCamelCase__ = """ Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. \"raw_values\" : Returns a full set of errors in case of multioutput input. \"uniform_average\" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric(\"mse\") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {'mse': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {'mse': 0.6123724356957945} If you're using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {'mse': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mse': array([0.41666667, 1. ])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html" ] , ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("float" ) ), "references": datasets.Sequence(datasets.Value("float" ) ), } else: return { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=None , _A="uniform_average" , _A=True ): """simple docstring""" __lowerCAmelCase = mean_squared_error( _A , _A , sample_weight=_A , multioutput=_A , squared=_A ) return {"mse": mse}
102
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase__ : int = { 'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'], 'tokenization_ctrl': ['CTRLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : List[str] = [ 'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST', 'CTRLForSequenceClassification', 'CTRLLMHeadModel', 'CTRLModel', 'CTRLPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : int = [ 'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFCTRLForSequenceClassification', 'TFCTRLLMHeadModel', 'TFCTRLModel', 'TFCTRLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
98
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : List[str] = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = emb.weight.shape _UpperCAmelCase : str = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) _UpperCAmelCase : Optional[int] = emb.weight.data return lin_layer def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=None ): _UpperCAmelCase : int = {} for old_key in state_dict.keys(): _UpperCAmelCase : Tuple = old_key if "moe_layer.experts." in key: if expert_idx is not None: _UpperCAmelCase : Optional[int] = key.replace("moe_layer.experts.0" , F"""ffn.experts.expert_{expert_idx}""" ) else: _UpperCAmelCase : Any = key.replace("moe_layer.experts." , "ffn.experts.expert_" ) if "gate" in key: _UpperCAmelCase : List[Any] = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" ) if "fc2" and "experts" not in key: _UpperCAmelCase : Tuple = key.replace(".fc2." , ".ffn.fc2." ) if "fc1" and "experts" not in key: _UpperCAmelCase : List[Any] = key.replace(".fc1." , ".ffn.fc1." ) if ".encoder_attn." in key: _UpperCAmelCase : List[Any] = key.replace(".encoder_attn." , ".cross_attention." ) if "encoder_attn_layer_norm" in key: _UpperCAmelCase : Any = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" ) if "final_layer_norm" in key: _UpperCAmelCase : int = key.replace("final_layer_norm" , "ff_layer_norm" ) _UpperCAmelCase : Tuple = state_dict[old_key] return new_dict def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = WEIGHTS_NAME ): _UpperCAmelCase : Optional[int] = [] _UpperCAmelCase : Optional[Any] = 0 os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) for expert in range(__lowerCAmelCase ): _UpperCAmelCase : Tuple = switch_checkpoint_path + F"""-rank-{expert}.pt""" if os.path.isfile(__lowerCAmelCase ): _UpperCAmelCase : Tuple = torch.load(__lowerCAmelCase )["model"] remove_ignore_keys_(__lowerCAmelCase ) _UpperCAmelCase : Dict = rename_fairseq_keys(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : List[str] = os.path.join( __lowerCAmelCase , weights_name.replace(".bin" , F"""-{len(__lowerCAmelCase )+1:05d}-of-???.bin""" ) ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__lowerCAmelCase )[0]].dtype ) # Add the last block _UpperCAmelCase : Tuple = os.path.join(__lowerCAmelCase , weights_name.replace(".bin" , F"""-{len(__lowerCAmelCase )+1:05d}-of-???.bin""" ) ) _UpperCAmelCase : Union[str, Any] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"] remove_ignore_keys_(__lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = rename_fairseq_keys(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : Any = shared_weights["decoder.embed_tokens.weight"] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__lowerCAmelCase ) == 1: _UpperCAmelCase : List[str] = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__lowerCAmelCase , __lowerCAmelCase ) # Otherwise, let's build the index _UpperCAmelCase : Union[str, Any] = {} for idx, shard in enumerate(__lowerCAmelCase ): _UpperCAmelCase : Tuple = weights_name.replace(".bin" , F"""-{idx+1:05d}-of-{len(__lowerCAmelCase ):05d}.bin""" ) _UpperCAmelCase : List[Any] = os.path.join(__lowerCAmelCase , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) for key in shard: _UpperCAmelCase : List[Any] = shard_file # Add the metadata _UpperCAmelCase : Any = {"total_size": total_size} _UpperCAmelCase : List[str] = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , "w" , encoding="utf-8" ) as f: _UpperCAmelCase : Tuple = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n" f.write(__lowerCAmelCase ) return metadata, index if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--nllb_moe_checkpoint_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b', type=str, required=False, help='Path to the output pytorch model.', ) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ ,lowerCamelCase__ = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) lowerCamelCase__ = NllbMoeConfig.from_pretrained( 'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) lowerCamelCase__ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('Done') model.save_pretrained(args.pytorch_dump_folder_path)
234
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class _snake_case ( _lowercase ): lowerCamelCase__: int = "ibert" def __init__( self: Union[str, Any] , __lowerCamelCase: List[str]=3_05_22 , __lowerCamelCase: str=7_68 , __lowerCamelCase: str=12 , __lowerCamelCase: List[str]=12 , __lowerCamelCase: Optional[int]=30_72 , __lowerCamelCase: List[str]="gelu" , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.1 , __lowerCamelCase: Optional[Any]=5_12 , __lowerCamelCase: List[Any]=2 , __lowerCamelCase: Tuple=0.02 , __lowerCamelCase: List[str]=1e-12 , __lowerCamelCase: Optional[int]=1 , __lowerCamelCase: int=0 , __lowerCamelCase: Optional[int]=2 , __lowerCamelCase: Tuple="absolute" , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: List[str]="none" , **__lowerCamelCase: Dict , ) -> int: super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Any = num_hidden_layers __UpperCAmelCase : int = num_attention_heads __UpperCAmelCase : Union[str, Any] = hidden_act __UpperCAmelCase : Union[str, Any] = intermediate_size __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : List[Any] = attention_probs_dropout_prob __UpperCAmelCase : int = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : Optional[int] = initializer_range __UpperCAmelCase : List[Any] = layer_norm_eps __UpperCAmelCase : Dict = position_embedding_type __UpperCAmelCase : Dict = quant_mode __UpperCAmelCase : Tuple = force_dequant class _snake_case ( _lowercase ): @property def _lowerCamelCase ( self: Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __UpperCAmelCase : int = {0: "batch", 1: "choice", 2: "sequence"} else: __UpperCAmelCase : Tuple = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
342
import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _snake_case = logging.get_logger(__name__) _snake_case = { '''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''', # See all DETR models at https://huggingface.co/models?filter=detr } class _snake_case ( _lowercase ): lowerCamelCase__: str = "detr" lowerCamelCase__: Dict = ["past_key_values"] lowerCamelCase__: str = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int: if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): __UpperCAmelCase : List[Any] = backbone_config.get("model_type" ) __UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type] __UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase ) # set timm attributes to None __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None __UpperCAmelCase : Any = use_timm_backbone __UpperCAmelCase : Optional[Any] = backbone_config __UpperCAmelCase : Optional[Any] = num_channels __UpperCAmelCase : List[Any] = num_queries __UpperCAmelCase : Optional[int] = d_model __UpperCAmelCase : Optional[Any] = encoder_ffn_dim __UpperCAmelCase : Dict = encoder_layers __UpperCAmelCase : List[Any] = encoder_attention_heads __UpperCAmelCase : int = decoder_ffn_dim __UpperCAmelCase : Tuple = decoder_layers __UpperCAmelCase : int = decoder_attention_heads __UpperCAmelCase : List[Any] = dropout __UpperCAmelCase : Dict = attention_dropout __UpperCAmelCase : Optional[Any] = activation_dropout __UpperCAmelCase : int = activation_function __UpperCAmelCase : Any = init_std __UpperCAmelCase : str = init_xavier_std __UpperCAmelCase : int = encoder_layerdrop __UpperCAmelCase : Tuple = decoder_layerdrop __UpperCAmelCase : List[Any] = encoder_layers __UpperCAmelCase : Optional[Any] = auxiliary_loss __UpperCAmelCase : int = position_embedding_type __UpperCAmelCase : Optional[int] = backbone __UpperCAmelCase : str = use_pretrained_backbone __UpperCAmelCase : Dict = dilation # Hungarian matcher __UpperCAmelCase : Optional[int] = class_cost __UpperCAmelCase : Optional[Any] = bbox_cost __UpperCAmelCase : Optional[int] = giou_cost # Loss coefficients __UpperCAmelCase : Any = mask_loss_coefficient __UpperCAmelCase : Any = dice_loss_coefficient __UpperCAmelCase : Any = bbox_loss_coefficient __UpperCAmelCase : Optional[int] = giou_loss_coefficient __UpperCAmelCase : Optional[Any] = eos_coefficient super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase ) @property def _lowerCamelCase ( self: Dict ) -> int: return self.encoder_attention_heads @property def _lowerCamelCase ( self: str ) -> int: return self.d_model @classmethod def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]: return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: str ) -> Dict[str, any]: __UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: __UpperCAmelCase : int = self.backbone_config.to_dict() __UpperCAmelCase : List[str] = self.__class__.model_type return output class _snake_case ( _lowercase ): lowerCamelCase__: Optional[int] = version.parse("1.11" ) @property def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _lowerCamelCase ( self: Optional[Any] ) -> float: return 1e-5 @property def _lowerCamelCase ( self: List[str] ) -> int: return 12
342
1
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def UpperCamelCase_ ( _UpperCAmelCase : List[str] ) -> str: """simple docstring""" _UpperCAmelCase : Tuple = [] for line in lines: _UpperCAmelCase : Optional[Any] = re.sub(R"#.*" , "" , _UpperCAmelCase ) # remove comments if line: filtered_lines.append(_UpperCAmelCase ) _UpperCAmelCase : Optional[int] = "\n".join(_UpperCAmelCase ) # Make a hash from all this code _UpperCAmelCase : Optional[int] = full_str.encode("utf-8" ) return shaaaa(_UpperCAmelCase ).hexdigest() # get importable module names and hash for caching __SCREAMING_SNAKE_CASE : Optional[Any] = { """csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), """json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), """pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), """parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), """arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), """text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), """imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), """audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions __SCREAMING_SNAKE_CASE : Tuple = { """.csv""": ("""csv""", {}), """.tsv""": ("""csv""", {"""sep""": """\t"""}), """.json""": ("""json""", {}), """.jsonl""": ("""json""", {}), """.parquet""": ("""parquet""", {}), """.arrow""": ("""arrow""", {}), """.txt""": ("""text""", {}), } _EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) __SCREAMING_SNAKE_CASE : str = {"""imagefolder""", """audiofolder"""} # Used to filter data files based on extensions given a module name __SCREAMING_SNAKE_CASE : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""") _MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
31
_a = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ) -> list[str]: """simple docstring""" __lowerCAmelCase: int = set() # keep track of all the paths to be checked __lowerCAmelCase: str = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue __lowerCAmelCase: str = queue.pop(0 ) # get the last node from the path __lowerCAmelCase: Union[str, Any] = path[-1] if node not in explored: __lowerCAmelCase: Dict = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: __lowerCAmelCase: Dict = list(SCREAMING_SNAKE_CASE ) new_path.append(SCREAMING_SNAKE_CASE ) queue.append(SCREAMING_SNAKE_CASE ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(SCREAMING_SNAKE_CASE ) # in case there's no path between the 2 nodes return [] def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ) -> int: """simple docstring""" if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 __lowerCAmelCase: Optional[int] = [start] __lowerCAmelCase: Dict = set(SCREAMING_SNAKE_CASE ) # Keep tab on distances from `start` node. __lowerCAmelCase: Optional[int] = {start: 0, target: -1} while queue: __lowerCAmelCase: Any = queue.pop(0 ) if node == target: __lowerCAmelCase: Optional[int] = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(SCREAMING_SNAKE_CASE ) queue.append(SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Union[str, Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
322
0
"""simple docstring""" import numpy as np __SCREAMING_SNAKE_CASE : Optional[int] = [ ['a', 'b', 'c', 'd', 'e'], ['f', 'g', 'h', 'i', 'k'], ['l', 'm', 'n', 'o', 'p'], ['q', 'r', 's', 't', 'u'], ['v', 'w', 'x', 'y', 'z'], ] class __A : '''simple docstring''' def __init__( self : Dict ) ->None: """simple docstring""" snake_case_ = np.array(UpperCAmelCase_ ) def lowerCAmelCase ( self : Any , UpperCAmelCase_ : str ) ->np.ndarray: """simple docstring""" snake_case_ , snake_case_ = np.where(letter == self.SQUARE ) snake_case_ = np.concatenate([indexa + 1, indexa + 1] ) return indexes def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) ->str: """simple docstring""" snake_case_ = self.SQUARE[indexa - 1, indexa - 1] return letter def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : str ) ->str: """simple docstring""" snake_case_ = message.lower() snake_case_ = message.replace(""" """ , """""" ) snake_case_ = message.replace("""j""" , """i""" ) snake_case_ = np.empty((2, len(UpperCAmelCase_ )) ) for letter_index in range(len(UpperCAmelCase_ ) ): snake_case_ = self.letter_to_numbers(message[letter_index] ) snake_case_ = numbers[0] snake_case_ = numbers[1] snake_case_ = first_step.reshape(2 * len(UpperCAmelCase_ ) ) snake_case_ = """""" for numbers_index in range(len(UpperCAmelCase_ ) ): snake_case_ = int(second_step[numbers_index * 2] ) snake_case_ = int(second_step[(numbers_index * 2) + 1] ) snake_case_ = self.numbers_to_letter(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = encoded_message + letter return encoded_message def lowerCAmelCase ( self : str , UpperCAmelCase_ : str ) ->str: """simple docstring""" snake_case_ = message.lower() message.replace(""" """ , """""" ) snake_case_ = np.empty(2 * len(UpperCAmelCase_ ) ) for letter_index in range(len(UpperCAmelCase_ ) ): snake_case_ = self.letter_to_numbers(message[letter_index] ) snake_case_ = numbers[0] snake_case_ = numbers[1] snake_case_ = first_step.reshape((2, len(UpperCAmelCase_ )) ) snake_case_ = """""" for numbers_index in range(len(UpperCAmelCase_ ) ): snake_case_ = int(second_step[0, numbers_index] ) snake_case_ = int(second_step[1, numbers_index] ) snake_case_ = self.numbers_to_letter(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = decoded_message + letter return decoded_message
371
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json', 'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = """mobilenet_v1""" def __init__( self : Union[str, Any] , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : List[Any]=224 , UpperCAmelCase_ : List[Any]=1.0 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : int="relu6" , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]=0.999 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[Any]=0.001 , **UpperCAmelCase_ : Any , ) ->Union[str, Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) if depth_multiplier <= 0: raise ValueError("""depth_multiplier must be greater than zero.""" ) snake_case_ = num_channels snake_case_ = image_size snake_case_ = depth_multiplier snake_case_ = min_depth snake_case_ = hidden_act snake_case_ = tf_padding snake_case_ = classifier_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps class __A (snake_case__): '''simple docstring''' __lowercase: int = version.parse("""1.11""") @property def lowerCAmelCase ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict([("""pixel_values""", {0: """batch"""})] ) @property def lowerCAmelCase ( self : int ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "image-classification": return OrderedDict([("""logits""", {0: """batch"""})] ) else: return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] ) @property def lowerCAmelCase ( self : int ) ->float: """simple docstring""" return 1E-4
233
0
'''simple docstring''' import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=224 , UpperCAmelCase=1000 , UpperCAmelCase=[3, 3, 6, 4] , UpperCAmelCase=[48, 56, 112, 220] , ) -> Union[str, Any]: _snake_case = parent _snake_case = batch_size _snake_case = num_channels _snake_case = is_training _snake_case = use_labels _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = num_labels _snake_case = image_size _snake_case = layer_depths _snake_case = embed_dims def lowercase (self ) -> Any: _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.num_labels ) _snake_case = self.get_config() return config, pixel_values, labels def lowercase (self ) -> int: return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=UpperCAmelCase , layer_scale_init_value=1e-5 , ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple: _snake_case = SwiftFormerModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _snake_case = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple: _snake_case = self.num_labels _snake_case = SwiftFormerForImageClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _snake_case = model(UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) _snake_case = SwiftFormerForImageClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase (self ) -> Dict: ((_snake_case), (_snake_case), (_snake_case)) = self.prepare_config_and_inputs() _snake_case = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () lowerCAmelCase_ = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowercase (self ) -> Union[str, Any]: _snake_case = SwiftFormerModelTester(self ) _snake_case = ConfigTester( self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def lowercase (self ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" ) def lowercase (self ) -> List[str]: pass def lowercase (self ) -> Any: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(UpperCAmelCase ) _snake_case = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) ) def lowercase (self ) -> int: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(UpperCAmelCase ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) def lowercase (self ) -> Union[str, Any]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def lowercase (self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase ) @slow def lowercase (self ) -> List[str]: for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = SwiftFormerModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @unittest.skip(reason="""SwiftFormer does not output attentions""" ) def lowercase (self ) -> Optional[Any]: pass def lowercase (self ) -> Optional[int]: def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): _snake_case = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): _snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) _snake_case = outputs.hidden_states _snake_case = 8 self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(UpperCAmelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> Dict: def _config_zero_init(UpperCAmelCase ): _snake_case = copy.deepcopy(UpperCAmelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(UpperCAmelCase , UpperCAmelCase , 1e-1_0 ) if isinstance(getattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ): _snake_case = _config_zero_init(getattr(UpperCAmelCase , UpperCAmelCase ) ) setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return configs_no_init _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = _config_zero_init(UpperCAmelCase ) for model_class in self.all_model_classes: _snake_case = model_class(config=UpperCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase (self ) -> Any: pass def __SCREAMING_SNAKE_CASE ( ): _snake_case = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase (self ) -> Dict: return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None @slow def lowercase (self ) -> Any: _snake_case = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(UpperCAmelCase ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) # forward pass with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) # verify the logits _snake_case = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) _snake_case = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
341
'''simple docstring''' __lowerCAmelCase = [ (1_000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'), (100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'), (10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I'), ] def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000} _snake_case = 0 _snake_case = 0 while place < len(_SCREAMING_SNAKE_CASE ): if (place + 1 < len(_SCREAMING_SNAKE_CASE )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = [] for arabic, roman in ROMAN: ((_snake_case), (_snake_case)) = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) result.append(roman * factor ) if number == 0: break return "".join(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
341
1
"""simple docstring""" import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType _lowercase ,_lowercase ,_lowercase : str = False, False, False @dataclass class _UpperCAmelCase : a__ : Optional[int] = None a__ : bool = True a__ : bool = True a__ : Optional[str] = None # Automatically constructed a__ : ClassVar[str] = "dict" a__ : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} ) a__ : str = field(default="Audio" , init=_lowerCAmelCase , repr=_lowerCAmelCase ) def __call__( self : Optional[int] ): return self.pa_type def a ( self : Any , _lowercase : Union[str, bytes, dict] ): try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err if isinstance(_lowercase , _lowercase ): return {"bytes": None, "path": value} elif isinstance(_lowercase , _lowercase ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes __UpperCAmelCase = BytesIO() sf.write(_lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith('''pcm''' ): # "PCM" only has raw audio bytes if value.get('''sampling_rate''' ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' ) if value.get('''bytes''' ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) __UpperCAmelCase = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67 else: __UpperCAmelCase = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67 __UpperCAmelCase = BytesIO(bytes() ) sf.write(_lowercase , _lowercase , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def a ( self : Dict , _lowercase : dict , _lowercase : Optional[Dict[str, Union[str, bool, None]]] = None ): if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' ) __UpperCAmelCase , __UpperCAmelCase = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None) if path is None and file is None: raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err __UpperCAmelCase = xsplitext(_lowercase )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( '''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( '''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) if file is None: __UpperCAmelCase = token_per_repo_id or {} __UpperCAmelCase = path.split('''::''' )[-1] try: __UpperCAmelCase = string_to_dict(_lowercase , config.HUB_DATASETS_URL )['''repo_id'''] __UpperCAmelCase = token_per_repo_id[repo_id] except (ValueError, KeyError): __UpperCAmelCase = None with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase ) as f: __UpperCAmelCase , __UpperCAmelCase = sf.read(_lowercase ) else: __UpperCAmelCase , __UpperCAmelCase = sf.read(_lowercase ) __UpperCAmelCase = array.T if self.mono: __UpperCAmelCase = librosa.to_mono(_lowercase ) if self.sampling_rate and self.sampling_rate != sampling_rate: __UpperCAmelCase = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate ) __UpperCAmelCase = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def a ( self : int ): from .features import Value if self.decode: raise ValueError('''Cannot flatten a decoded Audio feature.''' ) return { "bytes": Value('''binary''' ), "path": Value('''string''' ), } def a ( self : Tuple , _lowercase : Union[pa.StringArray, pa.StructArray] ): if pa.types.is_string(storage.type ): __UpperCAmelCase = pa.array([None] * len(_lowercase ) , type=pa.binary() ) __UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): __UpperCAmelCase = pa.array([None] * len(_lowercase ) , type=pa.string() ) __UpperCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ): __UpperCAmelCase = pa.array([Audio().encode_example(_lowercase ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: __UpperCAmelCase = storage.field('''bytes''' ) else: __UpperCAmelCase = pa.array([None] * len(_lowercase ) , type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: __UpperCAmelCase = storage.field('''path''' ) else: __UpperCAmelCase = pa.array([None] * len(_lowercase ) , type=pa.string() ) __UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) return array_cast(_lowercase , self.pa_type ) def a ( self : Tuple , _lowercase : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(_lowercase : int ): with xopen(_lowercase , '''rb''' ) as f: __UpperCAmelCase = f.read() return bytes_ __UpperCAmelCase = pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) __UpperCAmelCase = pa.array( [os.path.basename(_lowercase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , ) __UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() ) return array_cast(_lowercase , self.pa_type )
86
"""simple docstring""" from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING _lowercase : List[Any] = logging.get_logger(__name__) @add_end_docstrings(_lowerCAmelCase ) class _UpperCAmelCase ( _lowerCAmelCase ): def __init__( self : str , *_lowercase : Tuple , **_lowercase : List[Any] ): super().__init__(*_lowercase , **_lowercase ) self.check_model_type(_lowercase ) def a ( self : int , _lowercase : Dict=None , _lowercase : List[Any]=None , _lowercase : int=None , **_lowercase : Dict ): __UpperCAmelCase , __UpperCAmelCase = {}, {} if padding is not None: __UpperCAmelCase = padding if truncation is not None: __UpperCAmelCase = truncation if top_k is not None: __UpperCAmelCase = top_k return preprocess_params, {}, postprocess_params def __call__( self : List[str] , _lowercase : Union["Image.Image", str] , _lowercase : str = None , **_lowercase : Optional[Any] ): if isinstance(_lowercase , (Image.Image, str) ) and isinstance(_lowercase , _lowercase ): __UpperCAmelCase = {'''image''': image, '''question''': question} else: __UpperCAmelCase = image __UpperCAmelCase = super().__call__(_lowercase , **_lowercase ) return results def a ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : Any=False , _lowercase : Union[str, Any]=False ): __UpperCAmelCase = load_image(inputs['''image'''] ) __UpperCAmelCase = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=_lowercase , truncation=_lowercase ) __UpperCAmelCase = self.image_processor(images=_lowercase , return_tensors=self.framework ) model_inputs.update(_lowercase ) return model_inputs def a ( self : Optional[Any] , _lowercase : str ): __UpperCAmelCase = self.model(**_lowercase ) return model_outputs def a ( self : str , _lowercase : Optional[int] , _lowercase : Any=5 ): if top_k > self.model.config.num_labels: __UpperCAmelCase = self.model.config.num_labels if self.framework == "pt": __UpperCAmelCase = model_outputs.logits.sigmoid()[0] __UpperCAmelCase , __UpperCAmelCase = probs.topk(_lowercase ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) __UpperCAmelCase = scores.tolist() __UpperCAmelCase = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_lowercase , _lowercase )]
86
1
"""simple docstring""" import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCamelCase_ : List[Any] = logging.getLogger(__name__) def _A ( ): """simple docstring""" a =argparse.ArgumentParser( description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' ) parser.add_argument( '''--dataset_name''' , type=lowercase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , ) parser.add_argument( '''--dataset_config''' , type=lowercase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' ) parser.add_argument( '''--tokenizer_name_or_path''' , type=lowercase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , ) parser.add_argument( '''--shard_size''' , type=lowercase , default=10_00 , help='''Number of entries to go in a single shard.''' , ) parser.add_argument('''--split''' , type=lowercase , default='''train''' , choices=['''train''', '''test''', '''validation'''] ) parser.add_argument( '''--limit''' , default=lowercase , type=lowercase , help='''Limit the number of shards (used for debugging).''' , ) parser.add_argument( '''--max_length''' , type=lowercase , default=5_12 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum''' ''' sequence length that is a multiple of 8.''' , ) parser.add_argument( '''--output_dir''' , default='''tf-tpu''' , type=lowercase , help='''Output directory where the TFRecord shards will be saved. If the''' ''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord''' ''' shards will be directly saved to a Google Cloud Storage bucket.''' , ) a =parser.parse_args() return args def _A ( lowercase ): """simple docstring""" def fn(lowercase ): return tokenizer(examples['''text'''] ) return fn def _A ( lowercase ): """simple docstring""" a =[] for i in range(len(tokenized_data['''input_ids'''] ) ): a ={ '''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ), '''attention_mask''': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ), } a =tf.train.Features(feature=lowercase ) a =tf.train.Example(features=lowercase ) a =example.SerializeToString() records.append(lowercase ) return records def _A ( lowercase ): """simple docstring""" a =datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: a =min(len(lowercase ) , args.limit ) a =dataset.select(range(lowercase ) ) print(f'''Limiting the dataset to {args.limit} entries.''' ) a =AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) a =os.path.join(args.output_dir , args.split ) if not os.path.exists(lowercase ): os.makedirs(lowercase ) else: a =os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. a =tokenize_function(lowercase ) a =dataset.map(lowercase , batched=lowercase , num_proc=4 , remove_columns=['''text'''] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(lowercase ): # Concatenate all texts. a ={k: sum(examples[k] , [] ) for k in examples.keys()} a =len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 a =(total_length // args.max_length) * args.max_length # Split by chunks of max_len. a ={ k: [t[i : i + args.max_length] for i in range(0 , lowercase , args.max_length )] for k, t in concatenated_examples.items() } return result a =dataset_tokenized.map(lowercase , batched=lowercase , batch_size=10_00 , num_proc=4 ) a =0 a =0 for shard in range(0 , len(lowercase ) , args.shard_size ): a =grouped_dataset[shard : shard + args.shard_size] a =len(dataset_snapshot['''input_ids'''] ) a =os.path.join(lowercase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' ) a =get_serialized_examples(lowercase ) with tf.io.TFRecordWriter(lowercase ) as out_file: for i in range(len(lowercase ) ): a =serialized_examples[i] out_file.write(lowercase ) print('''Wrote file {} containing {} records'''.format(lowercase , lowercase ) ) shard_count += 1 total_records += records_containing with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f: print(f'''Total {args.split} records: {total_records}''' , file=lowercase ) if __name__ == "__main__": lowerCamelCase_ : Dict = parse_args() main(args)
81
from __future__ import annotations def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int ) -> list[int]: UpperCamelCase__ : Optional[int] = 0 UpperCamelCase__ : Tuple = len(__UpperCAmelCase ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: UpperCamelCase__ : Tuple = i + 1 else: UpperCamelCase__ : str = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F'''{two_pointer([2, 7, 11, 15], 9) = }''')
201
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class A__ ( _snake_case ): lowercase = "vit_msn" def __init__( self , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-0_6 , UpperCamelCase__=224 , UpperCamelCase__=16 , UpperCamelCase__=3 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]: '''simple docstring''' super().__init__(**UpperCamelCase__ ) A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = initializer_range A_ = layer_norm_eps A_ = image_size A_ = patch_size A_ = num_channels A_ = qkv_bias
353
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase = { '''configuration_blenderbot''': [ '''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotConfig''', '''BlenderbotOnnxConfig''', ], '''tokenization_blenderbot''': ['''BlenderbotTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ['''BlenderbotTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ '''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotForCausalLM''', '''BlenderbotForConditionalGeneration''', '''BlenderbotModel''', '''BlenderbotPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ '''TFBlenderbotForConditionalGeneration''', '''TFBlenderbotModel''', '''TFBlenderbotPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ '''FlaxBlenderbotForConditionalGeneration''', '''FlaxBlenderbotModel''', '''FlaxBlenderbotPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
101
0
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class _lowercase ( unittest.TestCase ): '''simple docstring''' def a ( self : Tuple ) -> Tuple: __lowerCAmelCase = torch.nn.Linear(10 , 10 ) __lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 ) __lowerCAmelCase = Accelerator() __lowerCAmelCase = accelerator.prepare(__SCREAMING_SNAKE_CASE ) try: pickle.loads(pickle.dumps(__SCREAMING_SNAKE_CASE ) ) except Exception as e: self.fail(f"""Accelerated optimizer pickling failed with {e}""" ) AcceleratorState._reset_state()
229
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Initialise PyTorch model __a = BigBirdConfig.from_json_file(_UpperCAmelCase ) print(f'Building PyTorch model from configuration: {config}' ) if is_trivia_qa: __a = BigBirdForQuestionAnswering(_UpperCAmelCase ) else: __a = BigBirdForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": __snake_case :Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--big_bird_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.''' ) __snake_case :Any = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
49
0
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """spiece.model"""} SCREAMING_SNAKE_CASE_ = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), } } SCREAMING_SNAKE_CASE_ = { """google/bigbird-roberta-base""": 4_0_9_6, """google/bigbird-roberta-large""": 4_0_9_6, """google/bigbird-base-trivia-itc""": 4_0_9_6, } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Any = VOCAB_FILES_NAMES __snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case : str = ["input_ids", "attention_mask"] __snake_case : List[int] = [] def __init__( self : Union[str, Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str="<unk>" ,lowerCamelCase__ : List[Any]="<s>" ,lowerCamelCase__ : Dict="</s>" ,lowerCamelCase__ : List[str]="<pad>" ,lowerCamelCase__ : Tuple="[SEP]" ,lowerCamelCase__ : Dict="[MASK]" ,lowerCamelCase__ : List[Any]="[CLS]" ,lowerCamelCase__ : Optional[Dict[str, Any]] = None ,**lowerCamelCase__ : Union[str, Any] ,) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else bos_token SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else eos_token SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else unk_token SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else pad_token SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else cls_token SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else mask_token SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowerCamelCase__ ,) SCREAMING_SNAKE_CASE = vocab_file SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str: '''simple docstring''' return self.sp_model.get_piece_size() def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.__dict__.copy() SCREAMING_SNAKE_CASE = None return state def __setstate__( self : Any ,lowerCamelCase__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(lowerCamelCase__ ,out_type=lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Optional[Any] ) -> Dict: '''simple docstring''' return self.sp_model.piece_to_id(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : int ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(lowerCamelCase__ ) return token def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : List[Any] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = """""" SCREAMING_SNAKE_CASE = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase__ ) + token SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = [] else: current_sub_tokens.append(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = False out_string += self.sp_model.decode(lowerCamelCase__ ) return out_string.strip() def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : bool = True ,**lowerCamelCase__ : Any ,) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = kwargs.pop("""use_source_tokenizer""" ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCamelCase__ ) ) SCREAMING_SNAKE_CASE = [] sub_texts.append(lowerCamelCase__ ) else: current_sub_text.append(lowerCamelCase__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCamelCase__ ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: SCREAMING_SNAKE_CASE = re.sub(R""" (\[(MASK|SEP)\])""" ,R"""\1""" ,""" """.join(lowerCamelCase__ ) ) else: SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: SCREAMING_SNAKE_CASE = self.clean_up_tokenization(lowerCamelCase__ ) return clean_text else: return text def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCamelCase__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return SCREAMING_SNAKE_CASE = os.path.join( lowerCamelCase__ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,lowerCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase__ ,"""wb""" ) as fi: SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase__ ) return (out_vocab_file,) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] SCREAMING_SNAKE_CASE = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase__ )) + [1] return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
193
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE_ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""ReformerTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""ReformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ReformerAttention""", """ReformerForMaskedLM""", """ReformerForQuestionAnswering""", """ReformerForSequenceClassification""", """ReformerLayer""", """ReformerModel""", """ReformerModelWithLMHead""", """ReformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
193
1
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _snake_case = datasets.utils.logging.get_logger(__name__) _snake_case = ["names", "prefix"] _snake_case = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] _snake_case = ["encoding_errors", "on_bad_lines"] _snake_case = ["date_format"] @dataclass class UpperCAmelCase_ ( datasets.BuilderConfig): lowerCamelCase__ = "," lowerCamelCase__ = None lowerCamelCase__ = "infer" lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = True lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = False lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = False lowerCamelCase__ = True lowerCamelCase__ = None lowerCamelCase__ = "." lowerCamelCase__ = None lowerCamelCase__ = '"' lowerCamelCase__ = 0 lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = 0 lowerCamelCase__ = True lowerCamelCase__ = False lowerCamelCase__ = None lowerCamelCase__ = 10000 lowerCamelCase__ = None lowerCamelCase__ = "strict" lowerCamelCase__ = "error" lowerCamelCase__ = None def snake_case__ ( self): '''simple docstring''' if self.delimiter is not None: _lowerCAmelCase : List[str] = self.delimiter if self.column_names is not None: _lowerCAmelCase : int = self.column_names @property def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = { '''sep''': self.sep, '''header''': self.header, '''names''': self.names, '''index_col''': self.index_col, '''usecols''': self.usecols, '''prefix''': self.prefix, '''mangle_dupe_cols''': self.mangle_dupe_cols, '''engine''': self.engine, '''converters''': self.converters, '''true_values''': self.true_values, '''false_values''': self.false_values, '''skipinitialspace''': self.skipinitialspace, '''skiprows''': self.skiprows, '''nrows''': self.nrows, '''na_values''': self.na_values, '''keep_default_na''': self.keep_default_na, '''na_filter''': self.na_filter, '''verbose''': self.verbose, '''skip_blank_lines''': self.skip_blank_lines, '''thousands''': self.thousands, '''decimal''': self.decimal, '''lineterminator''': self.lineterminator, '''quotechar''': self.quotechar, '''quoting''': self.quoting, '''escapechar''': self.escapechar, '''comment''': self.comment, '''encoding''': self.encoding, '''dialect''': self.dialect, '''error_bad_lines''': self.error_bad_lines, '''warn_bad_lines''': self.warn_bad_lines, '''skipfooter''': self.skipfooter, '''doublequote''': self.doublequote, '''memory_map''': self.memory_map, '''float_precision''': self.float_precision, '''chunksize''': self.chunksize, '''encoding_errors''': self.encoding_errors, '''on_bad_lines''': self.on_bad_lines, '''date_format''': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), lowerCamelCase__): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class UpperCAmelCase_ ( datasets.ArrowBasedBuilder): lowerCamelCase__ = CsvConfig def snake_case__ ( self): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features) def snake_case__ ( self, __a): '''simple docstring''' if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") _lowerCAmelCase : Tuple = dl_manager.download_and_extract(self.config.data_files) if isinstance(lowerCamelCase__, (str, list, tuple)): _lowerCAmelCase : Optional[Any] = data_files if isinstance(lowerCamelCase__, lowerCamelCase__): _lowerCAmelCase : Union[str, Any] = [files] _lowerCAmelCase : List[Any] = [dl_manager.iter_files(lowerCamelCase__) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] _lowerCAmelCase : Optional[int] = [] for split_name, files in data_files.items(): if isinstance(lowerCamelCase__, lowerCamelCase__): _lowerCAmelCase : Optional[Any] = [files] _lowerCAmelCase : Dict = [dl_manager.iter_files(lowerCamelCase__) for file in files] splits.append(datasets.SplitGenerator(name=lowerCamelCase__, gen_kwargs={"files": files})) return splits def snake_case__ ( self, __a): '''simple docstring''' if self.config.features is not None: _lowerCAmelCase : List[Any] = self.config.features.arrow_schema if all(not require_storage_cast(lowerCamelCase__) for feature in self.config.features.values()): # cheaper cast _lowerCAmelCase : Optional[int] = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=lowerCamelCase__) else: # more expensive cast; allows str <-> int/float or str to Audio for example _lowerCAmelCase : List[str] = table_cast(lowerCamelCase__, lowerCamelCase__) return pa_table def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase : int = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str _lowerCAmelCase : Any = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCamelCase__) else object for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values()) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase__)): _lowerCAmelCase : Dict = pd.read_csv(lowerCamelCase__, iterator=lowerCamelCase__, dtype=lowerCamelCase__, **self.config.pd_read_csv_kwargs) try: for batch_idx, df in enumerate(lowerCamelCase__): _lowerCAmelCase : Optional[int] = pa.Table.from_pandas(lowerCamelCase__) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCamelCase__) except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(lowerCamelCase__)}: {e}") raise
36
import os import sys __UpperCamelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), "src") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __UpperCamelCase : Tuple = [ "torch", "numpy", "tokenizers", "filelock", "requests", "tqdm", "regex", "sentencepiece", "sacremoses", "importlib_metadata", "huggingface_hub", ] @add_start_docstrings(AutoConfig.__doc__ ) def _a ( *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ): """simple docstring""" return AutoConfig.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoTokenizer.__doc__ ) def _a ( *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" return AutoTokenizer.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModel.__doc__ ) def _a ( *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" return AutoModel.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def _a ( *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" return AutoModelForCausalLM.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def _a ( *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Any ): """simple docstring""" return AutoModelForMaskedLM.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def _a ( *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" return AutoModelForSequenceClassification.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def _a ( *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" return AutoModelForQuestionAnswering.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
146
0
"""simple docstring""" def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" return " ".join( ''.join(word[::-1] ) if len(UpperCamelCase__ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words("Hey wollef sroirraw"))
367
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class UpperCamelCase__( unittest.TestCase ): def snake_case__ ( self ) -> int: A__ = tempfile.mkdtemp() A__ = BlipImageProcessor() A__ = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' ) A__ = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' ) A__ = InstructBlipProcessor(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def snake_case__ ( self ,**__UpperCAmelCase ) -> str: return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCAmelCase ).tokenizer def snake_case__ ( self ,**__UpperCAmelCase ) -> Tuple: return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCAmelCase ).image_processor def snake_case__ ( self ,**__UpperCAmelCase ) -> Tuple: return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCAmelCase ).qformer_tokenizer def snake_case__ ( self ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def snake_case__ ( self ) -> str: A__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] A__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def snake_case__ ( self ) -> Any: A__ = InstructBlipProcessor( tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ,qformer_tokenizer=self.get_qformer_tokenizer() ,) processor.save_pretrained(self.tmpdirname ) A__ = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) A__ = self.get_image_processor(do_normalize=__UpperCAmelCase ,padding_value=1.0 ) A__ = InstructBlipProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCAmelCase ) self.assertIsInstance(processor.qformer_tokenizer ,__UpperCAmelCase ) def snake_case__ ( self ) -> str: A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = self.get_qformer_tokenizer() A__ = InstructBlipProcessor( tokenizer=__UpperCAmelCase ,image_processor=__UpperCAmelCase ,qformer_tokenizer=__UpperCAmelCase ) A__ = self.prepare_image_inputs() A__ = image_processor(__UpperCAmelCase ,return_tensors='np' ) A__ = processor(images=__UpperCAmelCase ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def snake_case__ ( self ) -> Tuple: A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = self.get_qformer_tokenizer() A__ = InstructBlipProcessor( tokenizer=__UpperCAmelCase ,image_processor=__UpperCAmelCase ,qformer_tokenizer=__UpperCAmelCase ) A__ = 'lower newer' A__ = processor(text=__UpperCAmelCase ) A__ = tokenizer(__UpperCAmelCase ,return_token_type_ids=__UpperCAmelCase ) A__ = qformer_tokenizer(__UpperCAmelCase ,return_token_type_ids=__UpperCAmelCase ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] ,encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] ,encoded_processor['qformer_' + key] ) def snake_case__ ( self ) -> str: A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = self.get_qformer_tokenizer() A__ = InstructBlipProcessor( tokenizer=__UpperCAmelCase ,image_processor=__UpperCAmelCase ,qformer_tokenizer=__UpperCAmelCase ) A__ = 'lower newer' A__ = self.prepare_image_inputs() A__ = processor(text=__UpperCAmelCase ,images=__UpperCAmelCase ) self.assertListEqual( list(inputs.keys() ) ,['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] ,) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def snake_case__ ( self ) -> Tuple: A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = self.get_qformer_tokenizer() A__ = InstructBlipProcessor( tokenizer=__UpperCAmelCase ,image_processor=__UpperCAmelCase ,qformer_tokenizer=__UpperCAmelCase ) A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A__ = processor.batch_decode(__UpperCAmelCase ) A__ = tokenizer.batch_decode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase ) def snake_case__ ( self ) -> Any: A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = self.get_qformer_tokenizer() A__ = InstructBlipProcessor( tokenizer=__UpperCAmelCase ,image_processor=__UpperCAmelCase ,qformer_tokenizer=__UpperCAmelCase ) A__ = 'lower newer' A__ = self.prepare_image_inputs() A__ = processor(text=__UpperCAmelCase ,images=__UpperCAmelCase ) self.assertListEqual( list(inputs.keys() ) ,['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] ,)
154
0
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __A ( lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = None lowerCAmelCase_ = BloomTokenizerFast lowerCAmelCase_ = BloomTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = """tokenizer_file""" lowerCAmelCase_ = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""} def __lowerCamelCase ( self ): '''simple docstring''' super().setUp() lowerCamelCase__ = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCamelCase ( self , **__lowerCAmelCase ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.get_rust_tokenizer() lowerCamelCase__ = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>'''] lowerCamelCase__ = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]] lowerCamelCase__ = tokenizer.batch_encode_plus(__lowerCAmelCase )['''input_ids'''] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def __lowerCamelCase ( self , __lowerCAmelCase=6 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input lowerCamelCase__ = '''This is a simple input''' lowerCamelCase__ = ['''This is a simple input 1''', '''This is a simple input 2'''] lowerCamelCase__ = ('''This is a simple input''', '''This is a pair''') lowerCamelCase__ = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests try: tokenizer_r.encode(__lowerCAmelCase , max_length=__lowerCAmelCase ) tokenizer_r.encode_plus(__lowerCAmelCase , max_length=__lowerCAmelCase ) tokenizer_r.batch_encode_plus(__lowerCAmelCase , max_length=__lowerCAmelCase ) tokenizer_r.encode(__lowerCAmelCase , max_length=__lowerCAmelCase ) tokenizer_r.batch_encode_plus(__lowerCAmelCase , max_length=__lowerCAmelCase ) except ValueError: self.fail('''Bloom Tokenizer should be able to deal with padding''' ) lowerCamelCase__ = None # Hotfixing padding = None self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' ) # Simple input self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' ) # Simple input self.assertRaises( __lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , ) # Pair input self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' ) # Pair input self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' ) # Pair input self.assertRaises( __lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.get_rust_tokenizer() lowerCamelCase__ = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=__lowerCAmelCase ) lowerCamelCase__ = next(iter(__lowerCAmelCase ) )['''premise'''] # pick up one data lowerCamelCase__ = list(sample_data.values() ) lowerCamelCase__ = list(map(tokenizer.encode , __lowerCAmelCase ) ) lowerCamelCase__ = [tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) for x in output_tokens] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
209
_a = 65_521 def lowerCAmelCase__(__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = 1 lowerCamelCase__ = 0 for plain_chr in plain_text: lowerCamelCase__ = (a + ord(__snake_case )) % MOD_ADLER lowerCamelCase__ = (b + a) % MOD_ADLER return (b << 16) | a
209
1
"""simple docstring""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class _lowerCAmelCase ( lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Any = BlenderbotSmallTokenizer __UpperCAmelCase : Dict = False def _lowercase ( self : Any ): super().setUp() __lowercase = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"] __lowercase = dict(zip(UpperCAmelCase__, range(len(UpperCAmelCase__ ) ) ) ) __lowercase = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""] __lowercase = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"} __lowercase = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) __lowercase = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) + "\n" ) with open(self.merges_file, "w", encoding="utf-8" ) as fp: fp.write("\n".join(UpperCAmelCase__ ) ) def _lowercase ( self : List[str], **UpperCAmelCase__ : int ): kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname, **UpperCAmelCase__ ) def _lowercase ( self : int, UpperCAmelCase__ : Dict ): __lowercase = "adapt act apte" __lowercase = "adapt act apte" return input_text, output_text def _lowercase ( self : Optional[Any] ): __lowercase = BlenderbotSmallTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map ) __lowercase = "adapt act apte" __lowercase = ["adapt", "act", "ap@@", "te"] __lowercase = tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] __lowercase = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ), UpperCAmelCase__ ) def _lowercase ( self : int ): __lowercase = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) assert tok("sam" ).input_ids == [1_3_8_4] __lowercase = "I am a small frog." __lowercase = tok([src_text], padding=UpperCAmelCase__, truncation=UpperCAmelCase__ )["input_ids"] __lowercase = tok.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__, clean_up_tokenization_spaces=UpperCAmelCase__ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def _lowercase ( self : List[Any] ): __lowercase = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) __lowercase = "I am a small frog ." __lowercase = "." __lowercase = tok(UpperCAmelCase__ )["input_ids"] __lowercase = tok(UpperCAmelCase__ )["input_ids"] assert encoded[-1] == encoded_dot[0]
144
"""simple docstring""" from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : jnp.ndarray class _lowerCAmelCase ( nn.Module ): """simple docstring""" __UpperCAmelCase : int __UpperCAmelCase : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6) __UpperCAmelCase : jnp.dtype = jnp.floataa def _lowercase ( self : Union[str, Any] ): __lowercase = nn.Conv( self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) __lowercase = [] for i in range(len(self.block_out_channels ) - 1 ): __lowercase = self.block_out_channels[i] __lowercase = self.block_out_channels[i + 1] __lowercase = nn.Conv( UpperCAmelCase__, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(UpperCAmelCase__ ) __lowercase = nn.Conv( UpperCAmelCase__, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(UpperCAmelCase__ ) __lowercase = blocks __lowercase = nn.Conv( self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self : Any, UpperCAmelCase__ : Union[str, Any] ): __lowercase = self.conv_in(UpperCAmelCase__ ) __lowercase = nn.silu(UpperCAmelCase__ ) for block in self.blocks: __lowercase = block(UpperCAmelCase__ ) __lowercase = nn.silu(UpperCAmelCase__ ) __lowercase = self.conv_out(UpperCAmelCase__ ) return embedding @flax_register_to_config class _lowerCAmelCase ( nn.Module ,lowercase ,lowercase ): """simple docstring""" __UpperCAmelCase : int = 3_2 __UpperCAmelCase : int = 4 __UpperCAmelCase : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) __UpperCAmelCase : Union[bool, Tuple[bool]] = False __UpperCAmelCase : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) __UpperCAmelCase : int = 2 __UpperCAmelCase : Union[int, Tuple[int]] = 8 __UpperCAmelCase : Optional[Union[int, Tuple[int]]] = None __UpperCAmelCase : int = 1_2_8_0 __UpperCAmelCase : float = 0.0 __UpperCAmelCase : bool = False __UpperCAmelCase : jnp.dtype = jnp.floataa __UpperCAmelCase : bool = True __UpperCAmelCase : int = 0 __UpperCAmelCase : str = "rgb" __UpperCAmelCase : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6) def _lowercase ( self : List[Any], UpperCAmelCase__ : jax.random.KeyArray ): # init input tensors __lowercase = (1, self.in_channels, self.sample_size, self.sample_size) __lowercase = jnp.zeros(UpperCAmelCase__, dtype=jnp.floataa ) __lowercase = jnp.ones((1,), dtype=jnp.intaa ) __lowercase = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa ) __lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8) __lowercase = jnp.zeros(UpperCAmelCase__, dtype=jnp.floataa ) __lowercase ,__lowercase = jax.random.split(UpperCAmelCase__ ) __lowercase = {"params": params_rng, "dropout": dropout_rng} return self.init(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )["params"] def _lowercase ( self : Union[str, Any] ): __lowercase = self.block_out_channels __lowercase = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. __lowercase = self.num_attention_heads or self.attention_head_dim # input __lowercase = nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time __lowercase = FlaxTimesteps( block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift ) __lowercase = FlaxTimestepEmbedding(UpperCAmelCase__, dtype=self.dtype ) __lowercase = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, ) __lowercase = self.only_cross_attention if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = (num_attention_heads,) * len(self.down_block_types ) # down __lowercase = [] __lowercase = [] __lowercase = block_out_channels[0] __lowercase = nn.Conv( UpperCAmelCase__, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(UpperCAmelCase__ ) for i, down_block_type in enumerate(self.down_block_types ): __lowercase = output_channel __lowercase = block_out_channels[i] __lowercase = i == len(UpperCAmelCase__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": __lowercase = FlaxCrossAttnDownBlockaD( in_channels=UpperCAmelCase__, out_channels=UpperCAmelCase__, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, ) else: __lowercase = FlaxDownBlockaD( in_channels=UpperCAmelCase__, out_channels=UpperCAmelCase__, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, ) down_blocks.append(UpperCAmelCase__ ) for _ in range(self.layers_per_block ): __lowercase = nn.Conv( UpperCAmelCase__, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(UpperCAmelCase__ ) if not is_final_block: __lowercase = nn.Conv( UpperCAmelCase__, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(UpperCAmelCase__ ) __lowercase = down_blocks __lowercase = controlnet_down_blocks # mid __lowercase = block_out_channels[-1] __lowercase = FlaxUNetMidBlockaDCrossAttn( in_channels=UpperCAmelCase__, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, ) __lowercase = nn.Conv( UpperCAmelCase__, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self : str, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[str], UpperCAmelCase__ : float = 1.0, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : bool = False, ): __lowercase = self.controlnet_conditioning_channel_order if channel_order == "bgr": __lowercase = jnp.flip(UpperCAmelCase__, axis=1 ) # 1. time if not isinstance(UpperCAmelCase__, jnp.ndarray ): __lowercase = jnp.array([timesteps], dtype=jnp.intaa ) elif isinstance(UpperCAmelCase__, jnp.ndarray ) and len(timesteps.shape ) == 0: __lowercase = timesteps.astype(dtype=jnp.floataa ) __lowercase = jnp.expand_dims(UpperCAmelCase__, 0 ) __lowercase = self.time_proj(UpperCAmelCase__ ) __lowercase = self.time_embedding(UpperCAmelCase__ ) # 2. pre-process __lowercase = jnp.transpose(UpperCAmelCase__, (0, 2, 3, 1) ) __lowercase = self.conv_in(UpperCAmelCase__ ) __lowercase = jnp.transpose(UpperCAmelCase__, (0, 2, 3, 1) ) __lowercase = self.controlnet_cond_embedding(UpperCAmelCase__ ) sample += controlnet_cond # 3. down __lowercase = (sample,) for down_block in self.down_blocks: if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase ,__lowercase = down_block(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, deterministic=not train ) else: __lowercase ,__lowercase = down_block(UpperCAmelCase__, UpperCAmelCase__, deterministic=not train ) down_block_res_samples += res_samples # 4. mid __lowercase = self.mid_block(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, deterministic=not train ) # 5. contronet blocks __lowercase = () for down_block_res_sample, controlnet_block in zip(UpperCAmelCase__, self.controlnet_down_blocks ): __lowercase = controlnet_block(UpperCAmelCase__ ) controlnet_down_block_res_samples += (down_block_res_sample,) __lowercase = controlnet_down_block_res_samples __lowercase = self.controlnet_mid_block(UpperCAmelCase__ ) # 6. scaling __lowercase = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=UpperCAmelCase__, mid_block_res_sample=UpperCAmelCase__ )
144
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ :Any = { "configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :List[Any] = [ "RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "ResNetForImageClassification", "ResNetModel", "ResNetPreTrainedModel", "ResNetBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :Any = [ "TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFResNetForImageClassification", "TFResNetModel", "TFResNetPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :List[Any] = [ "FlaxResNetForImageClassification", "FlaxResNetModel", "FlaxResNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys a_ :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
277
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() a_ :List[Any] = logging.get_logger(__name__) a_ :List[Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } a_ :List[Any] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def lowercase_ (A : Dict ): snake_case__ : Optional[Any] = {} with open(A , 'r' ) as file: for line_number, line in enumerate(A ): snake_case__ : Dict = line.strip() if line: snake_case__ : int = line.split() snake_case__ : List[str] = line_number snake_case__ : Dict = words[0] snake_case__ : Optional[Any] = value return result def lowercase_ (A : int , A : int , A : Optional[int] , A : Optional[Any] , A : Tuple ): for attribute in key.split('.' ): snake_case__ : Optional[int] = getattr(A , A ) snake_case__ : Union[str, Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(A ): snake_case__ : List[str] = PARAM_MAPPING[full_name.split('.' )[-1]] snake_case__ : Dict = 'param' if weight_type is not None and weight_type != "param": snake_case__ : Union[str, Any] = getattr(A , A ).shape elif weight_type is not None and weight_type == "param": snake_case__ : Optional[int] = hf_pointer for attribute in hf_param_name.split('.' ): snake_case__ : Optional[Any] = getattr(A , A ) snake_case__ : Dict = shape_pointer.shape # let's reduce dimension snake_case__ : List[Any] = value[0] else: snake_case__ : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": snake_case__ : Any = value elif weight_type == "weight_g": snake_case__ : List[Any] = value elif weight_type == "weight_v": snake_case__ : Any = value elif weight_type == "bias": snake_case__ : List[Any] = value elif weight_type == "param": for attribute in hf_param_name.split('.' ): snake_case__ : int = getattr(A , A ) snake_case__ : Optional[int] = value else: snake_case__ : Optional[Any] = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowercase_ (A : Tuple , A : List[Any] , A : int , A : str , A : Tuple ): snake_case__ : Optional[int] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(A ): snake_case__ : List[str] = PARAM_MAPPING[full_name.split('.' )[-1]] snake_case__ : str = 'param' if weight_type is not None and weight_type != "param": snake_case__ : int = '.'.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case__ : Any = '.'.join([key, hf_param_name] ) else: snake_case__ : Dict = key snake_case__ : List[str] = value if 'lm_head' in full_key else value[0] a_ :List[str] = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def lowercase_ (A : str , A : Optional[Any] , A : Optional[Any]=None , A : List[str]=None ): snake_case__ : Optional[int] = False for key, mapped_key in MAPPING.items(): snake_case__ : Tuple = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: snake_case__ : Optional[int] = True if "*" in mapped_key: snake_case__ : List[Any] = name.split(A )[0].split('.' )[-2] snake_case__ : Union[str, Any] = mapped_key.replace('*' , A ) if "weight_g" in name: snake_case__ : Tuple = 'weight_g' elif "weight_v" in name: snake_case__ : List[str] = 'weight_v' elif "bias" in name: snake_case__ : Dict = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case__ : Optional[int] = 'weight' else: snake_case__ : str = None if hf_dict is not None: rename_dict(A , A , A , A , A ) else: set_recursively(A , A , A , A , A ) return is_used return is_used def lowercase_ (A : Optional[Any] , A : Dict , A : Optional[int] ): snake_case__ : Dict = [] snake_case__ : Tuple = fairseq_model.state_dict() snake_case__ : str = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case__ : str = False if "conv_layers" in name: load_conv_layer( A , A , A , A , hf_model.config.feat_extract_norm == 'group' , ) snake_case__ : Any = True else: snake_case__ : Dict = load_wavaveca_layer(A , A , A ) if not is_used: unused_weights.append(A ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase_ (A : Dict , A : Optional[Any] , A : Tuple , A : str , A : List[str] ): snake_case__ : List[Any] = full_name.split('conv_layers.' )[-1] snake_case__ : List[str] = name.split('.' ) snake_case__ : List[Any] = int(items[0] ) snake_case__ : str = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) snake_case__ : Any = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) snake_case__ : str = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) snake_case__ : str = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) snake_case__ : int = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(A ) @torch.no_grad() def lowercase_ (A : Union[str, Any] , A : str , A : Tuple=None , A : List[str]=None , A : Any=True , A : Optional[int]=False ): if config_path is not None: snake_case__ : List[Any] = WavaVecaConfig.from_pretrained(A ) else: snake_case__ : List[Any] = WavaVecaConfig() if is_seq_class: snake_case__ : Dict = read_txt_into_dict(A ) snake_case__ : Any = idalabel snake_case__ : Union[str, Any] = WavaVecaForSequenceClassification(A ) snake_case__ : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=A , return_attention_mask=A , ) feature_extractor.save_pretrained(A ) elif is_finetuned: if dict_path: snake_case__ : str = Dictionary.load(A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case__ : List[str] = target_dict.pad_index snake_case__ : Optional[int] = target_dict.bos_index snake_case__ : Optional[int] = target_dict.eos_index snake_case__ : List[Any] = len(target_dict.symbols ) snake_case__ : str = os.path.join(A , 'vocab.json' ) if not os.path.isdir(A ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A ) ) return os.makedirs(A , exist_ok=A ) snake_case__ : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched snake_case__ : Optional[Any] = 0 snake_case__ : Union[str, Any] = 1 with open(A , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(A , A ) snake_case__ : List[Any] = WavaVecaCTCTokenizer( A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A , ) snake_case__ : str = True if config.feat_extract_norm == 'layer' else False snake_case__ : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=A , return_attention_mask=A , ) snake_case__ : Union[str, Any] = WavaVecaProcessor(feature_extractor=A , tokenizer=A ) processor.save_pretrained(A ) snake_case__ : str = WavaVecaForCTC(A ) else: snake_case__ : int = WavaVecaForPreTraining(A ) if is_finetuned or is_seq_class: snake_case__ , snake_case__ , snake_case__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: snake_case__ : Tuple = argparse.Namespace(task='audio_pretraining' ) snake_case__ : str = fairseq.tasks.setup_task(A ) snake_case__ , snake_case__ , snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A ) snake_case__ : List[Any] = model[0].eval() recursively_load_weights(A , A , not is_finetuned ) hf_wavavec.save_pretrained(A ) if __name__ == "__main__": a_ :List[Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) a_ :str = parser.parse_args() a_ :Tuple = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
277
1
import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder _lowerCamelCase : Any = '''base_with_context''' def a_ ( __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Any: _snake_case = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) ) _snake_case = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase ) for lyr_num, lyr in enumerate(model.encoders ): _snake_case = weights[f'''layers_{lyr_num}'''] _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) _snake_case = ly_weight['attention'] _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def a_ ( __lowercase : Union[str, Any] , __lowercase : int ) -> List[Any]: _snake_case = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) ) _snake_case = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase ) for lyr_num, lyr in enumerate(model.encoders ): _snake_case = weights[f'''layers_{lyr_num}'''] _snake_case = ly_weight['attention'] _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) _snake_case = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def a_ ( __lowercase : Dict , __lowercase : Tuple ) -> Dict: _snake_case = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) ) _snake_case = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase ) _snake_case = nn.Parameter( torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) ) for lyr_num, lyr in enumerate(model.decoders ): _snake_case = weights[f'''layers_{lyr_num}'''] _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) ) _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) ) _snake_case = ly_weight['self_attention'] _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) _snake_case = ly_weight['MultiHeadDotProductAttention_0'] _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) ) _snake_case = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) ) return model def a_ ( __lowercase : Union[str, Any] ) -> Tuple: _snake_case = checkpoints.load_tax_checkpoint(args.checkpoint_path ) _snake_case = jnp.tree_util.tree_map(onp.array , __lowercase ) _snake_case = [ 'from __gin__ import dynamic_registration', 'from music_spectrogram_diffusion.models.diffusion import diffusion_utils', 'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0', 'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()', ] _snake_case = os.path.join(args.checkpoint_path , '..' , 'config.gin' ) _snake_case = inference.parse_training_gin_file(__lowercase , __lowercase ) _snake_case = inference.InferenceModel(args.checkpoint_path , __lowercase ) _snake_case = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' ) _snake_case = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) _snake_case = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) _snake_case = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) _snake_case = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __lowercase ) _snake_case = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __lowercase ) _snake_case = load_decoder(ta_checkpoint['target']['decoder'] , __lowercase ) _snake_case = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' ) _snake_case = SpectrogramDiffusionPipeline( notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": _lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument( '''--checkpoint_path''', default=F'{MODEL}/checkpoint_500000', type=str, required=False, help='''Path to the original jax model checkpoint.''', ) _lowerCamelCase : Dict = parser.parse_args() main(args)
130
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig _lowerCamelCase : List[Any] = logging.get_logger(__name__) # General docstring _lowerCamelCase : Dict = '''PoolFormerConfig''' # Base docstring _lowerCamelCase : int = '''sail/poolformer_s12''' _lowerCamelCase : Optional[Any] = [1, 512, 7, 7] # Image classification docstring _lowerCamelCase : Optional[int] = '''sail/poolformer_s12''' _lowerCamelCase : List[Any] = '''tabby, tabby cat''' _lowerCamelCase : List[str] = [ '''sail/poolformer_s12''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def a_ ( __lowercase : List[Any] , __lowercase : float = 0.0 , __lowercase : bool = False ) -> Optional[int]: if drop_prob == 0.0 or not training: return input _snake_case = 1 - drop_prob _snake_case = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets _snake_case = keep_prob + torch.rand(__lowercase , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize _snake_case = input.div(__lowercase ) * random_tensor return output class SCREAMING_SNAKE_CASE__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , lowercase : Optional[float] = None ): '''simple docstring''' super().__init__() _snake_case = drop_prob def A ( self : Any , lowercase : torch.Tensor ): '''simple docstring''' return drop_path(lowercase , self.drop_prob , self.training ) def A ( self : Tuple ): '''simple docstring''' return "p={}".format(self.drop_prob ) class SCREAMING_SNAKE_CASE__ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , lowercase : Dict , lowercase : Dict , lowercase : str , lowercase : int , lowercase : Optional[Any] , lowercase : str=None ): '''simple docstring''' super().__init__() _snake_case = patch_size if isinstance(lowercase , collections.abc.Iterable ) else (patch_size, patch_size) _snake_case = stride if isinstance(lowercase , collections.abc.Iterable ) else (stride, stride) _snake_case = padding if isinstance(lowercase , collections.abc.Iterable ) else (padding, padding) _snake_case = nn.Convad(lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=lowercase ) _snake_case = norm_layer(lowercase ) if norm_layer else nn.Identity() def A ( self : int , lowercase : Union[str, Any] ): '''simple docstring''' _snake_case = self.projection(lowercase ) _snake_case = self.norm(lowercase ) return embeddings class SCREAMING_SNAKE_CASE__ ( nn.GroupNorm ): '''simple docstring''' def __init__( self : Dict , lowercase : List[Any] , **lowercase : str ): '''simple docstring''' super().__init__(1 , lowercase , **lowercase ) class SCREAMING_SNAKE_CASE__ ( nn.Module ): '''simple docstring''' def __init__( self : int , lowercase : List[Any] ): '''simple docstring''' super().__init__() _snake_case = nn.AvgPoolad(lowercase , stride=1 , padding=pool_size // 2 , count_include_pad=lowercase ) def A ( self : int , lowercase : List[str] ): '''simple docstring''' return self.pool(lowercase ) - hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): '''simple docstring''' def __init__( self : Dict , lowercase : Tuple , lowercase : str , lowercase : Optional[Any] , lowercase : Union[str, Any] ): '''simple docstring''' super().__init__() _snake_case = nn.Convad(lowercase , lowercase , 1 ) _snake_case = nn.Convad(lowercase , lowercase , 1 ) _snake_case = PoolFormerDropPath(lowercase ) if isinstance(config.hidden_act , lowercase ): _snake_case = ACTaFN[config.hidden_act] else: _snake_case = config.hidden_act def A ( self : Optional[int] , lowercase : str ): '''simple docstring''' _snake_case = self.conva(lowercase ) _snake_case = self.act_fn(lowercase ) _snake_case = self.drop(lowercase ) _snake_case = self.conva(lowercase ) _snake_case = self.drop(lowercase ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): '''simple docstring''' def __init__( self : str , lowercase : Tuple , lowercase : int , lowercase : str , lowercase : Union[str, Any] , lowercase : str , lowercase : Dict ): '''simple docstring''' super().__init__() _snake_case = PoolFormerPooling(lowercase ) _snake_case = PoolFormerOutput(lowercase , lowercase , lowercase , lowercase ) _snake_case = PoolFormerGroupNorm(lowercase ) _snake_case = PoolFormerGroupNorm(lowercase ) # Useful for training neural nets _snake_case = PoolFormerDropPath(lowercase ) if drop_path > 0.0 else nn.Identity() _snake_case = config.use_layer_scale if config.use_layer_scale: _snake_case = nn.Parameter( config.layer_scale_init_value * torch.ones((lowercase) ) , requires_grad=lowercase ) _snake_case = nn.Parameter( config.layer_scale_init_value * torch.ones((lowercase) ) , requires_grad=lowercase ) def A ( self : Optional[int] , lowercase : Union[str, Any] ): '''simple docstring''' if self.use_layer_scale: _snake_case = self.pooling(self.before_norm(lowercase ) ) _snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection _snake_case = hidden_states + self.drop_path(lowercase ) _snake_case = () _snake_case = self.output(self.after_norm(lowercase ) ) _snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection _snake_case = hidden_states + self.drop_path(lowercase ) _snake_case = (output,) + outputs return outputs else: _snake_case = self.drop_path(self.pooling(self.before_norm(lowercase ) ) ) # First residual connection _snake_case = pooling_output + hidden_states _snake_case = () # Second residual connection inside the PoolFormerOutput block _snake_case = self.drop_path(self.output(self.after_norm(lowercase ) ) ) _snake_case = hidden_states + layer_output _snake_case = (output,) + outputs return outputs class SCREAMING_SNAKE_CASE__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , lowercase : Optional[int] ): '''simple docstring''' super().__init__() _snake_case = config # stochastic depth decay rule _snake_case = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings _snake_case = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) _snake_case = nn.ModuleList(lowercase ) # Transformer blocks _snake_case = [] _snake_case = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers _snake_case = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( lowercase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(lowercase ) ) _snake_case = nn.ModuleList(lowercase ) def A ( self : Any , lowercase : List[str] , lowercase : str=False , lowercase : Tuple=True ): '''simple docstring''' _snake_case = () if output_hidden_states else None _snake_case = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): _snake_case , _snake_case = layers # Get patch embeddings from hidden_states _snake_case = embedding_layer(lowercase ) # Send the embeddings through the blocks for _, blk in enumerate(lowercase ): _snake_case = blk(lowercase ) _snake_case = layer_outputs[0] if output_hidden_states: _snake_case = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase ) class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' _UpperCAmelCase : List[Any] = PoolFormerConfig _UpperCAmelCase : Optional[int] = "poolformer" _UpperCAmelCase : str = "pixel_values" _UpperCAmelCase : int = True def A ( self : Tuple , lowercase : str ): '''simple docstring''' if isinstance(lowercase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(lowercase , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def A ( self : Optional[Any] , lowercase : str , lowercase : Dict=False ): '''simple docstring''' if isinstance(lowercase , lowercase ): _snake_case = value _lowerCamelCase : Optional[Any] = r''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' _lowerCamelCase : Tuple = r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PoolFormerImageProcessor.__call__`] for details. ''' @add_start_docstrings( "The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." ,UpperCAmelCase ,) class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' def __init__( self : str , lowercase : List[Any] ): '''simple docstring''' super().__init__(lowercase ) _snake_case = config _snake_case = PoolFormerEncoder(lowercase ) # Initialize weights and apply final processing self.post_init() def A ( self : List[str] ): '''simple docstring''' return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def A ( self : Tuple , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ): '''simple docstring''' _snake_case = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) _snake_case = self.encoder( lowercase , output_hidden_states=lowercase , return_dict=lowercase , ) _snake_case = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=lowercase , hidden_states=encoder_outputs.hidden_states , ) class SCREAMING_SNAKE_CASE__ ( nn.Module ): '''simple docstring''' def __init__( self : List[str] , lowercase : Union[str, Any] ): '''simple docstring''' super().__init__() _snake_case = nn.Linear(config.hidden_size , config.hidden_size ) def A ( self : Optional[Any] , lowercase : Optional[int] ): '''simple docstring''' _snake_case = self.dense(lowercase ) return output @add_start_docstrings( "\n PoolFormer Model transformer with an image classification head on top\n " ,UpperCAmelCase ,) class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' def __init__( self : str , lowercase : Any ): '''simple docstring''' super().__init__(lowercase ) _snake_case = config.num_labels _snake_case = PoolFormerModel(lowercase ) # Final norm _snake_case = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head _snake_case = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ): '''simple docstring''' _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = self.poolformer( lowercase , output_hidden_states=lowercase , return_dict=lowercase , ) _snake_case = outputs[0] _snake_case = self.classifier(self.norm(lowercase ).mean([-2, -1] ) ) _snake_case = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _snake_case = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _snake_case = 'single_label_classification' else: _snake_case = 'multi_label_classification' if self.config.problem_type == "regression": _snake_case = MSELoss() if self.num_labels == 1: _snake_case = loss_fct(logits.squeeze() , labels.squeeze() ) else: _snake_case = loss_fct(lowercase , lowercase ) elif self.config.problem_type == "single_label_classification": _snake_case = CrossEntropyLoss() _snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _snake_case = BCEWithLogitsLoss() _snake_case = loss_fct(lowercase , lowercase ) if not return_dict: _snake_case = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
130
1
import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument UpperCAmelCase : int = { '''/attention/''': '''/0/SelfAttention/''', '''/self_attention/''': '''/0/SelfAttention/''', '''/encoder_decoder_attention/''': '''/1/EncDecAttention/''', '''value''': '''v''', '''query''': '''q''', '''key''': '''k''', '''out''': '''o''', '''pre_self_attention_layer_norm''': '''0/layer_norm''', '''pre_cross_attention_layer_norm''': '''1/layer_norm''', '''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong '''token_embedder''': '''shared''', '''encoder_norm''': '''final_layer_norm''', '''decoder_norm''': '''final_layer_norm''', '''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''', '''router/router_weights/w/''': '''router/classifier/''', '''roer/roer_weights/w/''': '''router/classifier/''', '''logits_dense''': '''lm_head''', } def _SCREAMING_SNAKE_CASE ( a ) -> Optional[Any]: # 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in # the original model __A : int = list(s_dict.keys() ) for key in keys: __A : Union[str, Any] = r'.*/layers_(\d+)' __A : Dict = key if re.match(a , a ): __A : List[str] = re.sub(r'layers_(\d+)' , r'block/\1/layer' , a ) __A : List[Any] = r'(encoder|decoder)\/' if re.match(a , a ): __A : Tuple = re.match(a , a ).groups() if groups[0] == "encoder": __A : Union[str, Any] = re.sub(r'/mlp/' , r'/1/mlp/' , a ) __A : Any = re.sub(r'/pre_mlp_layer_norm/' , r'/1/layer_norm/' , a ) elif groups[0] == "decoder": __A : Optional[int] = re.sub(r'/mlp/' , r'/2/mlp/' , a ) __A : Optional[int] = re.sub(r'/pre_mlp_layer_norm/' , r'/2/layer_norm/' , a ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: __A : str = new_key.replace(a , a ) print(F"""{key} -> {new_key}""" ) __A : Dict = s_dict.pop(a ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __A : Any = s_dict[ 'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __A : List[Any] = s_dict[ 'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: __A : Optional[int] = s_dict[key].shape[0] __A : Tuple = s_dict[key] for idx in range(a ): __A : str = expert_weihts[idx] print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" ) s_dict.pop(a ) return s_dict UpperCAmelCase : Tuple = { '''NUM_ENCODER_LAYERS''': '''num_layers''', '''NUM_DECODER_LAYERS''': '''num_decoder_layers''', '''NUM_HEADS''': '''num_heads''', '''HEAD_DIM''': '''d_kv''', '''EMBED_DIM''': '''d_model''', '''MLP_DIM''': '''d_ff''', '''NUM_SELECTED_EXPERTS''': '''num_selected_experts''', '''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''', '''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''', '''dense.MlpBlock.activations''': '''feed_forward_proj''', } def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[Any]: # Convert a google style config to the hugging face fromat import regex as re with open(a , 'r' ) as f: __A : List[str] = f.read() __A : int = re.findall(r'(.*) = ([0-9.]*)' , a ) __A : Optional[Any] = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": __A : str = float(a ) if '.' in value else int(a ) __A : Any = re.findall(r'(.*activations) = \(\'(.*)\',\)' , a )[0] __A : List[Any] = str(activation[1] ) __A : List[str] = num_experts __A : Union[str, Any] = SwitchTransformersConfig(**a ) return config def _SCREAMING_SNAKE_CASE ( a , a , a=None , a="./" , a=8 ) -> List[Any]: # Initialise PyTorch model print(F"""Loading flax weights from : {flax_checkpoint_path}""" ) __A : List[Any] = checkpoints.load_tax_checkpoint(a ) if gin_file is not None: __A : str = convert_gin_to_config(a , a ) else: __A : int = SwitchTransformersConfig.from_pretrained(a ) __A : Union[str, Any] = SwitchTransformersForConditionalGeneration(a ) __A : Optional[Any] = flax_params['target'] __A : Optional[int] = flatten_dict(a , sep='/' ) __A : Union[str, Any] = rename_keys(a ) __A : Any = unflatten_dict(a , sep='/' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(a , a ) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) pt_model.save_pretrained(a ) if __name__ == "__main__": UpperCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--switch_t5x_checkpoint_path''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the''' ''' model architecture. If not provided, a `gin_file` has to be provided.''' ), ) parser.add_argument( '''--gin_file''', default=None, type=str, required=False, help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''', ) parser.add_argument( '''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.''' ) parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''') UpperCAmelCase : Optional[int] = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
280
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class _A: """simple docstring""" def __init__( self , _A = None ): if components is None: __A : int = [] __A : Tuple = list(_A ) def __len__( self ): return len(self.__components ) def __str__( self ): return "(" + ",".join(map(_A , self.__components ) ) + ")" def __add__( self , _A ): __A : Optional[int] = len(self ) if size == len(_A ): __A : Any = [self.__components[i] + other.component(_A ) for i in range(_A )] return Vector(_A ) else: raise Exception('must have the same size' ) def __sub__( self , _A ): __A : Tuple = len(self ) if size == len(_A ): __A : Union[str, Any] = [self.__components[i] - other.component(_A ) for i in range(_A )] return Vector(_A ) else: # error case raise Exception('must have the same size' ) @overload def __mul__( self , _A ): ... @overload def __mul__( self , _A ): ... def __mul__( self , _A ): if isinstance(_A , (float, int) ): __A : str = [c * other for c in self.__components] return Vector(_A ) elif isinstance(_A , _A ) and len(self ) == len(_A ): __A : Union[str, Any] = len(self ) __A : Dict = [self.__components[i] * other.component(_A ) for i in range(_A )] return sum(_A ) else: # error case raise Exception('invalid operand!' ) def UpperCAmelCase_ ( self ): return Vector(self.__components ) def UpperCAmelCase_ ( self , _A ): if isinstance(_A , _A ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception('index out of range' ) def UpperCAmelCase_ ( self , _A , _A ): assert -len(self.__components ) <= pos < len(self.__components ) __A : Optional[int] = value def UpperCAmelCase_ ( self ): if len(self.__components ) == 0: raise Exception('Vector is empty' ) __A : Optional[Any] = [c**2 for c in self.__components] return math.sqrt(sum(_A ) ) def UpperCAmelCase_ ( self , _A , _A = False ): __A : Optional[Any] = self * other __A : Optional[Any] = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def _SCREAMING_SNAKE_CASE ( a ) -> Vector: assert isinstance(a , a ) return Vector([0] * dimension ) def _SCREAMING_SNAKE_CASE ( a , a ) -> Vector: assert isinstance(a , a ) and (isinstance(a , a )) __A : Optional[Any] = [0] * dimension __A : Tuple = 1 return Vector(a ) def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector: assert ( isinstance(a , a ) and isinstance(a , a ) and (isinstance(a , (int, float) )) ) return x * scalar + y def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector: random.seed(a ) __A : str = [random.randint(a , a ) for _ in range(a )] return Vector(a ) class _A: """simple docstring""" def __init__( self , _A , _A , _A ): __A : Optional[Any] = matrix __A : Dict = w __A : Optional[int] = h def __str__( self ): __A : Tuple = '' for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self , _A ): if self.__width == other.width() and self.__height == other.height(): __A : Optional[Any] = [] for i in range(self.__height ): __A : Optional[Any] = [ self.__matrix[i][j] + other.component(_A , _A ) for j in range(self.__width ) ] matrix.append(_A ) return Matrix(_A , self.__width , self.__height ) else: raise Exception('matrix must have the same dimension!' ) def __sub__( self , _A ): if self.__width == other.width() and self.__height == other.height(): __A : Tuple = [] for i in range(self.__height ): __A : str = [ self.__matrix[i][j] - other.component(_A , _A ) for j in range(self.__width ) ] matrix.append(_A ) return Matrix(_A , self.__width , self.__height ) else: raise Exception('matrices must have the same dimension!' ) @overload def __mul__( self , _A ): ... @overload def __mul__( self , _A ): ... def __mul__( self , _A ): if isinstance(_A , _A ): # matrix-vector if len(_A ) == self.__width: __A : List[Any] = zero_vector(self.__height ) for i in range(self.__height ): __A : List[str] = [ self.__matrix[i][j] * other.component(_A ) for j in range(self.__width ) ] ans.change_component(_A , sum(_A ) ) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!' ) elif isinstance(_A , (int, float) ): # matrix-scalar __A : List[str] = [ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(_A , self.__width , self.__height ) return None def UpperCAmelCase_ ( self ): return self.__height def UpperCAmelCase_ ( self ): return self.__width def UpperCAmelCase_ ( self , _A , _A ): if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds' ) def UpperCAmelCase_ ( self , _A , _A , _A ): if 0 <= x < self.__height and 0 <= y < self.__width: __A : int = value else: raise Exception('change_component: indices out of bounds' ) def UpperCAmelCase_ ( self , _A , _A ): if self.__height != self.__width: raise Exception('Matrix is not square' ) __A : List[str] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(_A ) ): __A : Optional[int] = minor[i][:y] + minor[i][y + 1 :] return Matrix(_A , self.__width - 1 , self.__height - 1 ).determinant() def UpperCAmelCase_ ( self , _A , _A ): if self.__height != self.__width: raise Exception('Matrix is not square' ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(_A , _A ) else: raise Exception('Indices out of bounds' ) def UpperCAmelCase_ ( self ): if self.__height != self.__width: raise Exception('Matrix is not square' ) if self.__height < 1: raise Exception('Matrix has no element' ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __A : List[str] = [ self.__matrix[0][y] * self.cofactor(0 , _A ) for y in range(self.__width ) ] return sum(_A ) def _SCREAMING_SNAKE_CASE ( a ) -> Matrix: __A : list[list[float]] = [[0] * n for _ in range(a )] return Matrix(a , a , a ) def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> Matrix: random.seed(a ) __A : list[list[float]] = [ [random.randint(a , a ) for _ in range(a )] for _ in range(a ) ] return Matrix(a , a , a )
280
1
"""simple docstring""" # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class _lowerCAmelCase ( snake_case_ ): __UpperCAmelCase : torch.FloatTensor __UpperCAmelCase : Optional[torch.FloatTensor] = None def __lowerCAmelCase ( lowercase : Any , lowercase : Dict=0.999 , lowercase : Dict="cosine" , ) -> Optional[Any]: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(lowercase : Union[str, Any] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowercase : Tuple ): return math.exp(t * -12.0 ) else: raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' ) snake_case : List[str] = [] for i in range(lowercase ): snake_case : int = i / num_diffusion_timesteps snake_case : str = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowercase ) / alpha_bar_fn(lowercase ) , lowercase ) ) return torch.tensor(lowercase , dtype=torch.floataa ) class _lowerCAmelCase ( snake_case_ , snake_case_ ): __UpperCAmelCase : Optional[int] = 1 @register_to_config def __init__( self , UpperCamelCase__ = 1000 , UpperCamelCase__ = 0.0001 , UpperCamelCase__ = 0.02 , UpperCamelCase__ = "linear" , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = True , UpperCamelCase__ = 0 , UpperCamelCase__ = "epsilon" , UpperCamelCase__ = 1.0 , **UpperCamelCase__ , ) -> str: '''simple docstring''' if kwargs.get("set_alpha_to_one" , UpperCamelCase__ ) is not None: snake_case : str = ( "The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead." ) deprecate("set_alpha_to_one" , "1.0.0" , UpperCamelCase__ , standard_warn=UpperCamelCase__ ) snake_case : Optional[int] = kwargs["set_alpha_to_one"] if trained_betas is not None: snake_case : List[Any] = torch.tensor(UpperCamelCase__ , dtype=torch.floataa ) elif beta_schedule == "linear": snake_case : Union[str, Any] = torch.linspace(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. snake_case : str = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase__ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule snake_case : Any = betas_for_alpha_bar(UpperCamelCase__ ) else: raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' ) snake_case : Optional[Any] = 1.0 - self.betas snake_case : Union[str, Any] = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. snake_case : List[str] = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution snake_case : Optional[Any] = 1.0 # setable values snake_case : Optional[Any] = None snake_case : Optional[int] = torch.from_numpy(np.arange(0 , UpperCamelCase__ ).copy().astype(np.intaa ) ) def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> torch.FloatTensor: '''simple docstring''' return sample def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> str: '''simple docstring''' if num_inference_steps > self.config.num_train_timesteps: raise ValueError( F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:' F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle' F' maximal {self.config.num_train_timesteps} timesteps.' ) snake_case : int = num_inference_steps snake_case : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 snake_case : List[str] = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round().copy().astype(np.intaa ) snake_case : Tuple = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ ) self.timesteps += self.config.steps_offset def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0.0 , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , ) -> Union[DDIMSchedulerOutput, Tuple]: '''simple docstring''' snake_case : Dict = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process snake_case : Any = self.alphas_cumprod[timestep] snake_case : Dict = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) snake_case : str = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": snake_case : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 snake_case : Union[str, Any] = model_output elif self.config.prediction_type == "sample": snake_case : str = model_output snake_case : Optional[int] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": snake_case : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output snake_case : Optional[int] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or' " `v_prediction`" ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: snake_case : int = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf snake_case : List[Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf snake_case : Dict = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ ) def __len__( self ) -> Any: '''simple docstring''' return self.config.num_train_timesteps
112
"""simple docstring""" import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 __snake_case = data_utils.TransfoXLTokenizer __snake_case = data_utils.TransfoXLCorpus __snake_case = data_utils __snake_case = data_utils def __lowerCAmelCase ( lowercase : Optional[int] , lowercase : int , lowercase : List[Any] , lowercase : Union[str, Any] ) -> List[Any]: """simple docstring""" if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(lowercase , "rb" ) as fp: snake_case : int = pickle.load(lowercase , encoding="latin1" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) snake_case : int = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"] print(F'Save vocabulary to {pytorch_vocab_dump_path}' ) snake_case : str = corpus.vocab.__dict__ torch.save(lowercase , lowercase ) snake_case : str = corpus.__dict__ corpus_dict_no_vocab.pop("vocab" , lowercase ) snake_case : Dict = pytorch_dump_folder_path + "/" + CORPUS_NAME print(F'Save dataset to {pytorch_dataset_dump_path}' ) torch.save(lowercase , lowercase ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model snake_case : Union[str, Any] = os.path.abspath(lowercase ) snake_case : str = os.path.abspath(lowercase ) print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' ) # Initialise PyTorch model if transfo_xl_config_file == "": snake_case : int = TransfoXLConfig() else: snake_case : Optional[int] = TransfoXLConfig.from_json_file(lowercase ) print(F'Building PyTorch model from configuration: {config}' ) snake_case : str = TransfoXLLMHeadModel(lowercase ) snake_case : str = load_tf_weights_in_transfo_xl(lowercase , lowercase , lowercase ) # Save pytorch-model snake_case : Union[str, Any] = os.path.join(lowercase , lowercase ) snake_case : Optional[Any] = os.path.join(lowercase , lowercase ) print(F'Save PyTorch model to {os.path.abspath(lowercase )}' ) torch.save(model.state_dict() , lowercase ) print(F'Save configuration file to {os.path.abspath(lowercase )}' ) with open(lowercase , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the folder to store the PyTorch model or dataset/vocab.""", ) parser.add_argument( """--tf_checkpoint_path""", default="""""", type=str, help="""An optional path to a TensorFlow checkpoint path to be converted.""", ) parser.add_argument( """--transfo_xl_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--transfo_xl_dataset_file""", default="""""", type=str, help="""An optional dataset file to be converted in a vocabulary.""", ) __snake_case = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
112
1
"""simple docstring""" import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase_ (a__ , unittest.TestCase ): """simple docstring""" pass @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" @property def __magic_name__ (self ) -> Optional[int]: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = ort.SessionOptions() SCREAMING_SNAKE_CASE__ : Union[str, Any] = False return options def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) SCREAMING_SNAKE_CASE__ : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) SCREAMING_SNAKE_CASE__ : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = """A red cat sitting on a park bench""" SCREAMING_SNAKE_CASE__ : int = np.random.RandomState(0 ) SCREAMING_SNAKE_CASE__ : Dict = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.images SCREAMING_SNAKE_CASE__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) SCREAMING_SNAKE_CASE__ : Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__ (self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) SCREAMING_SNAKE_CASE__ : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = """A red cat sitting on a park bench""" SCREAMING_SNAKE_CASE__ : Dict = np.random.RandomState(0 ) SCREAMING_SNAKE_CASE__ : str = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ : Any = output.images SCREAMING_SNAKE_CASE__ : int = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
25
from __future__ import annotations from typing import Generic, TypeVar lowerCamelCase_ = TypeVar('''T''') class __A( Generic[T] ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = data UpperCamelCase__ = self UpperCamelCase__ = 0 class __A( Generic[T] ): """simple docstring""" def __init__(self ): # map from node name to the node object UpperCamelCase__ = {} def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): # create a new set with x as its member UpperCamelCase__ = DisjointSetTreeNode(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): # find the set x belongs to (with path-compression) UpperCamelCase__ = self.map[data] if elem_ref != elem_ref.parent: UpperCamelCase__ = self.find_set(elem_ref.parent.data ) return elem_ref.parent def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # helper function for union operation if nodea.rank > nodea.rank: UpperCamelCase__ = nodea else: UpperCamelCase__ = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # merge 2 disjoint sets self.link(self.find_set(SCREAMING_SNAKE_CASE_ ) , self.find_set(SCREAMING_SNAKE_CASE_ ) ) class __A( Generic[T] ): """simple docstring""" def __init__(self ): # connections: map from the node to the neighbouring nodes (with weights) UpperCamelCase__ = {} def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): # add a node ONLY if its not present in the graph if node not in self.connections: UpperCamelCase__ = {} def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # add an edge with the given weight self.add_node(SCREAMING_SNAKE_CASE_ ) self.add_node(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = weight UpperCamelCase__ = weight def UpperCAmelCase_ (self ): UpperCamelCase__ = [] UpperCamelCase__ = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[2] ) # creating the disjoint set UpperCamelCase__ = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(SCREAMING_SNAKE_CASE_ ) # MST generation UpperCamelCase__ = 0 UpperCamelCase__ = 0 UpperCamelCase__ = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index] index += 1 UpperCamelCase__ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) disjoint_set.union(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return graph
244
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDMaDPipeline, UNetaDConditionModel, ) from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class __snake_case ( unittest.TestCase ): a__ = StableDiffusionLDMaDPipeline a__ = TEXT_TO_IMAGE_PARAMS a__ = TEXT_TO_IMAGE_BATCH_PARAMS a__ = TEXT_TO_IMAGE_IMAGE_PARAMS def lowerCamelCase_ ( self) -> Optional[int]: '''simple docstring''' torch.manual_seed(0) a__: Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) a__: Any = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowercase , set_alpha_to_one=lowercase , ) torch.manual_seed(0) a__: Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0) a__: Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) a__: List[str] = CLIPTextModel(lowercase) a__: Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') a__: Any = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def lowerCamelCase_ ( self , lowercase , lowercase=0) -> List[str]: '''simple docstring''' if str(lowercase).startswith('mps'): a__: Tuple = torch.manual_seed(lowercase) else: a__: Optional[Any] = torch.Generator(device=lowercase).manual_seed(lowercase) a__: Optional[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def lowerCamelCase_ ( self) -> Optional[Any]: '''simple docstring''' a__: str = 'cpu' # ensure determinism for the device-dependent torch.Generator a__: Tuple = self.get_dummy_components() a__: str = StableDiffusionLDMaDPipeline(**lowercase) a__: str = ldmad_pipe.to(lowercase) ldmad_pipe.set_progress_bar_config(disable=lowercase) a__: Any = self.get_dummy_inputs(lowercase) a__: Any = ldmad_pipe(**lowercase) a__: str = output.rgb, output.depth a__: Dict = rgb[0, -3:, -3:, -1] a__: List[str] = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) a__: Tuple = np.array( [0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262]) a__: int = np.array([103.46727, 85.812004, 87.849236]) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb).max() < 1e-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth).max() < 1e-2 def lowerCamelCase_ ( self) -> Tuple: '''simple docstring''' a__: str = self.get_dummy_components() a__: Any = StableDiffusionLDMaDPipeline(**lowercase) a__: Dict = ldmad_pipe.to(lowercase) ldmad_pipe.set_progress_bar_config(disable=lowercase) a__: str = self.get_dummy_inputs(lowercase) a__: int = 3 * [inputs['prompt']] # forward a__: Any = ldmad_pipe(**lowercase) a__: Any = output.rgb, output.depth a__: Dict = rgb_slice_a[0, -3:, -3:, -1] a__: List[Any] = depth_slice_a[0, -3:, -1] a__: List[Any] = self.get_dummy_inputs(lowercase) a__: int = 3 * [inputs.pop('prompt')] a__: str = ldmad_pipe.tokenizer( lowercase , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=lowercase , return_tensors='pt' , ) a__: List[str] = text_inputs['input_ids'].to(lowercase) a__: Dict = ldmad_pipe.text_encoder(lowercase)[0] a__: List[Any] = prompt_embeds # forward a__: Optional[int] = ldmad_pipe(**lowercase) a__: List[str] = output.rgb, output.depth a__: str = rgb_slice_a[0, -3:, -3:, -1] a__: Optional[Any] = depth_slice_a[0, -3:, -1] assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten()).max() < 1e-4 assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten()).max() < 1e-4 def lowerCamelCase_ ( self) -> List[str]: '''simple docstring''' a__: List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator a__: str = self.get_dummy_components() a__: int = PNDMScheduler(skip_prk_steps=lowercase) a__: List[Any] = StableDiffusionLDMaDPipeline(**lowercase) a__: Tuple = ldmad_pipe.to(lowercase) ldmad_pipe.set_progress_bar_config(disable=lowercase) a__: Union[str, Any] = self.get_dummy_inputs(lowercase) a__: Any = 'french fries' a__: Optional[Any] = ldmad_pipe(**lowercase , negative_prompt=lowercase) a__: Any = output.rgb, output.depth a__: Dict = rgb[0, -3:, -3:, -1] a__: Union[str, Any] = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) a__: Tuple = np.array( [0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217]) a__: Dict = np.array([107.84738, 84.62802, 89.962135]) assert np.abs(rgb_slice.flatten() - expected_slice_rgb).max() < 1e-2 assert np.abs(depth_slice.flatten() - expected_slice_depth).max() < 1e-2 @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def lowerCamelCase_ ( self) -> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self , lowercase , lowercase="cpu" , lowercase=torch.floataa , lowercase=0) -> Tuple: '''simple docstring''' a__: int = torch.Generator(device=lowercase).manual_seed(lowercase) a__: Optional[Any] = np.random.RandomState(lowercase).standard_normal((1, 4, 64, 64)) a__: Any = torch.from_numpy(lowercase).to(device=lowercase , dtype=lowercase) a__: str = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def lowerCamelCase_ ( self) -> List[str]: '''simple docstring''' a__: List[str] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d') a__: Optional[Any] = ldmad_pipe.to(lowercase) ldmad_pipe.set_progress_bar_config(disable=lowercase) a__: Optional[Any] = self.get_inputs(lowercase) a__: Union[str, Any] = ldmad_pipe(**lowercase) a__: Optional[Any] = output.rgb, output.depth a__: str = rgb[0, -3:, -3:, -1].flatten() a__: str = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 5_12, 5_12, 3) assert depth.shape == (1, 5_12, 5_12) a__: Optional[Any] = np.array( [0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706]) a__: str = np.array( [0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706]) assert np.abs(rgb_slice - expected_slice_rgb).max() < 3e-3 assert np.abs(depth_slice - expected_slice_depth).max() < 3e-3 @nightly @require_torch_gpu class __snake_case ( unittest.TestCase ): def lowerCamelCase_ ( self) -> int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self , lowercase , lowercase="cpu" , lowercase=torch.floataa , lowercase=0) -> Dict: '''simple docstring''' a__: Optional[Any] = torch.Generator(device=lowercase).manual_seed(lowercase) a__: str = np.random.RandomState(lowercase).standard_normal((1, 4, 64, 64)) a__: Dict = torch.from_numpy(lowercase).to(device=lowercase , dtype=lowercase) a__: str = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 50, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def lowerCamelCase_ ( self) -> Optional[int]: '''simple docstring''' a__: List[str] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d').to(lowercase) ldmad_pipe.set_progress_bar_config(disable=lowercase) a__: List[str] = self.get_inputs(lowercase) a__: Dict = ldmad_pipe(**lowercase) a__: Dict = output.rgb, output.depth a__: Union[str, Any] = 0.495586 a__: Union[str, Any] = 0.33795515 a__: str = 112.48518 a__: List[Any] = 98.489746 assert np.abs(expected_rgb_mean - rgb.mean()) < 1e-3 assert np.abs(expected_rgb_std - rgb.std()) < 1e-3 assert np.abs(expected_depth_mean - depth.mean()) < 1e-3 assert np.abs(expected_depth_std - depth.std()) < 1e-3 def lowerCamelCase_ ( self) -> str: '''simple docstring''' a__: Union[str, Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c').to(lowercase) ldmad_pipe.set_progress_bar_config(disable=lowercase) a__: List[str] = self.get_inputs(lowercase) a__: Union[str, Any] = ldmad_pipe(**lowercase) a__: List[Any] = output.rgb, output.depth a__: Optional[Any] = 0.4194127 a__: Optional[Any] = 0.35375586 a__: int = 0.5638502 a__: Dict = 0.34686103 assert rgb.shape == (1, 5_12, 5_12, 3) assert depth.shape == (1, 5_12, 5_12, 1) assert np.abs(expected_rgb_mean - rgb.mean()) < 1e-3 assert np.abs(expected_rgb_std - rgb.std()) < 1e-3 assert np.abs(expected_depth_mean - depth.mean()) < 1e-3 assert np.abs(expected_depth_std - depth.std()) < 1e-3
358
"""simple docstring""" def __a ( _SCREAMING_SNAKE_CASE ) ->bool: return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') ) def __a ( _SCREAMING_SNAKE_CASE ) ->bool: a__: Any = credit_card_number a__: Tuple = 0 a__: List[str] = len(_SCREAMING_SNAKE_CASE ) - 2 for i in range(_SCREAMING_SNAKE_CASE , -1 , -2 ): # double the value of every second digit a__: Tuple = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 a__: Optional[Any] = cc_number[:i] + str(_SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def __a ( _SCREAMING_SNAKE_CASE ) ->bool: a__: Optional[int] = F'{credit_card_number} is an invalid credit card number because' if not credit_card_number.isdigit(): print(F'{error_message} it has nonnumerical characters.' ) return False if not 13 <= len(_SCREAMING_SNAKE_CASE ) <= 16: print(F'{error_message} of its length.' ) return False if not validate_initial_digits(_SCREAMING_SNAKE_CASE ): print(F'{error_message} of its first two digits.' ) return False if not luhn_validation(_SCREAMING_SNAKE_CASE ): print(F'{error_message} it fails the Luhn check.' ) return False print(F'{credit_card_number} is a valid credit card number.' ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number('4111111111111111') validate_credit_card_number('32323')
203
0
from __future__ import annotations from collections.abc import Iterator from typing import Any class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = data UpperCamelCase = None class SCREAMING_SNAKE_CASE_ : def __init__( self : Any ): """simple docstring""" UpperCamelCase = None UpperCamelCase = None def __iter__( self : Any ): """simple docstring""" UpperCamelCase = self.head while self.head: yield node.data UpperCamelCase = node.next if node == self.head: break def __len__( self : Any ): """simple docstring""" return sum(1 for _ in self ) def __repr__( self : Union[str, Any] ): """simple docstring""" return "->".join(str(lowerCamelCase_ ) for item in iter(self ) ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any ): """simple docstring""" self.insert_nth(len(self ) , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any ): """simple docstring""" self.insert_nth(0 , lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : Any ): """simple docstring""" if index < 0 or index > len(self ): raise IndexError("""list index out of range.""" ) UpperCamelCase = Node(lowerCamelCase_ ) if self.head is None: UpperCamelCase = new_node # first node points itself UpperCamelCase = UpperCamelCase = new_node elif index == 0: # insert at head UpperCamelCase = self.head UpperCamelCase = UpperCamelCase = new_node else: UpperCamelCase = self.head for _ in range(index - 1 ): UpperCamelCase = temp.next UpperCamelCase = temp.next UpperCamelCase = new_node if index == len(self ) - 1: # insert at tail UpperCamelCase = new_node def lowerCamelCase_ ( self : Dict ): """simple docstring""" return self.delete_nth(0 ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return self.delete_nth(len(self ) - 1 ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int = 0 ): """simple docstring""" if not 0 <= index < len(self ): raise IndexError("""list index out of range.""" ) UpperCamelCase = self.head if self.head == self.tail: # just one node UpperCamelCase = UpperCamelCase = None elif index == 0: # delete head node UpperCamelCase = self.tail.next.next UpperCamelCase = self.head.next else: UpperCamelCase = self.head for _ in range(index - 1 ): UpperCamelCase = temp.next UpperCamelCase = temp.next UpperCamelCase = temp.next.next if index == len(self ) - 1: # delete at tail UpperCamelCase = temp return delete_node.data def lowerCamelCase_ ( self : Dict ): """simple docstring""" return len(self ) == 0 def lowercase( ) -> None: '''simple docstring''' UpperCamelCase = CircularLinkedList() assert len(UpperCamelCase__ ) == 0 assert circular_linked_list.is_empty() is True assert str(UpperCamelCase__ ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(UpperCamelCase__ ) == i circular_linked_list.insert_nth(UpperCamelCase__ , i + 1 ) assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
343
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase ={"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase =[ "VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTMSNModel", "ViTMSNForImageClassification", "ViTMSNPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys __UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
67
0
import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging __lowerCamelCase : Union[str, Any] = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) __lowerCamelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def SCREAMING_SNAKE_CASE ( ): snake_case__ : List[Any] = "https://pypi.org/pypi/diffusers/json" snake_case__ : Dict = json.loads(request.urlopen(snake_case_ ).read() )["releases"].keys() return sorted(snake_case_ , key=lambda snake_case_ : version.Version(snake_case_ ) ) def SCREAMING_SNAKE_CASE ( ): # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(snake_case_ ) os.makedirs(snake_case_ , exist_ok=snake_case_ ) snake_case__ : Optional[int] = Path(snake_case_ ) / "__init__.py" if not init_path.exists(): init_path.touch() def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, os.PathLike] ): init_hf_modules() snake_case__ : Union[str, Any] = Path(snake_case_ ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(snake_case_ , exist_ok=snake_case_ ) snake_case__ : str = dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ): with open(snake_case_ , "r" , encoding="utf-8" ) as f: snake_case__ : Optional[Any] = f.read() # Imports of the form `import .xxx` snake_case__ : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , snake_case_ , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , snake_case_ , flags=re.MULTILINE ) # Unique-ify return list(set(snake_case_ ) ) def SCREAMING_SNAKE_CASE ( snake_case_ : Dict ): snake_case__ : Optional[int] = False snake_case__ : Tuple = [module_file] snake_case__ : Tuple = [] # Let's recurse through all relative imports while not no_change: snake_case__ : Tuple = [] for f in files_to_check: new_imports.extend(get_relative_imports(snake_case_ ) ) snake_case__ : int = Path(snake_case_ ).parent snake_case__ : int = [str(module_path / m ) for m in new_imports] snake_case__ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports] snake_case__ : int = [F'''{f}.py''' for f in new_import_files] snake_case__ : Optional[Any] = len(snake_case_ ) == 0 all_relative_imports.extend(snake_case_ ) return all_relative_imports def SCREAMING_SNAKE_CASE ( snake_case_ : Any ): with open(snake_case_ , "r" , encoding="utf-8" ) as f: snake_case__ : Optional[Any] = f.read() # Imports of the form `import xxx` snake_case__ : Any = re.findall("^\s*import\s+(\S+)\s*$" , snake_case_ , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall("^\s*from\s+(\S+)\s+import" , snake_case_ , flags=re.MULTILINE ) # Only keep the top-level module snake_case__ : Tuple = [imp.split("." )[0] for imp in imports if not imp.startswith("." )] # Unique-ify and test we got them all snake_case__ : Dict = list(set(snake_case_ ) ) snake_case__ : List[Any] = [] for imp in imports: try: importlib.import_module(snake_case_ ) except ImportError: missing_packages.append(snake_case_ ) if len(snake_case_ ) > 0: raise ImportError( "This modeling file requires the following packages that were not found in your environment: " F'''{', '.join(snake_case_ )}. Run `pip install {' '.join(snake_case_ )}`''' ) return get_relative_imports(snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : List[Any] ): snake_case__ : Dict = module_path.replace(os.path.sep , "." ) snake_case__ : Union[str, Any] = importlib.import_module(snake_case_ ) if class_name is None: return find_pipeline_class(snake_case_ ) return getattr(snake_case_ , snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : int ): from ..pipelines import DiffusionPipeline snake_case__ : str = dict(inspect.getmembers(snake_case_ , inspect.isclass ) ) snake_case__ : Tuple = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , snake_case_ ) and cls.__module__.split("." )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:''' F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in''' F''' {loaded_module}.''' ) snake_case__ : Any = cls return pipeline_class def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, os.PathLike] , snake_case_ : str , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , ): snake_case__ : Optional[Any] = str(snake_case_ ) snake_case__ : Union[str, Any] = os.path.join(snake_case_ , snake_case_ ) if os.path.isfile(snake_case_ ): snake_case__ : Dict = module_file_or_url snake_case__ : Tuple = "local" elif pretrained_model_name_or_path.count("/" ) == 0: snake_case__ : Optional[Any] = get_diffusers_versions() # cut ".dev0" snake_case__ : Tuple = "v" + ".".join(__version__.split("." )[:3] ) # retrieve github version that matches if revision is None: snake_case__ : Optional[int] = latest_version if latest_version[1:] in available_versions else "main" logger.info(F'''Defaulting to latest_version: {revision}.''' ) elif revision in available_versions: snake_case__ : Any = F'''v{revision}''' elif revision == "main": snake_case__ : Tuple = revision else: raise ValueError( F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of''' F''' {', '.join(available_versions + ['main'] )}.''' ) # community pipeline on GitHub snake_case__ : Dict = COMMUNITY_PIPELINES_URL.format(revision=snake_case_ , pipeline=snake_case_ ) try: snake_case__ : Tuple = cached_download( snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , local_files_only=snake_case_ , use_auth_token=snake_case_ , ) snake_case__ : int = "git" snake_case__ : Optional[int] = pretrained_model_name_or_path + ".py" except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise else: try: # Load from URL or cache if already cached snake_case__ : Dict = hf_hub_download( snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , local_files_only=snake_case_ , use_auth_token=snake_case_ , ) snake_case__ : List[Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) ) except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise # Check we have all the requirements in our environment snake_case__ : List[str] = check_imports(snake_case_ ) # Now we move the module inside our cached dynamic modules. snake_case__ : Any = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(snake_case_ ) snake_case__ : List[Any] = Path(snake_case_ ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(snake_case_ , submodule_path / module_file ) for module_needed in modules_needed: snake_case__ : Dict = F'''{module_needed}.py''' shutil.copy(os.path.join(snake_case_ , snake_case_ ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(snake_case_ , snake_case_ ): snake_case__ : List[Any] = use_auth_token elif use_auth_token is True: snake_case__ : List[Any] = HfFolder.get_token() else: snake_case__ : List[Any] = None snake_case__ : Union[str, Any] = model_info(snake_case_ , revision=snake_case_ , token=snake_case_ ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. snake_case__ : Union[str, Any] = submodule_path / commit_hash snake_case__ : Any = full_submodule + os.path.sep + commit_hash create_dynamic_module(snake_case_ ) if not (submodule_path / module_file).exists(): shutil.copy(snake_case_ , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( snake_case_ , F'''{module_needed}.py''' , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , ) return os.path.join(snake_case_ , snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, os.PathLike] , snake_case_ : str , snake_case_ : Optional[str] = None , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , **snake_case_ : Any , ): snake_case__ : Tuple = get_cached_module_file( snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , ) return get_class_in_module(snake_case_ , final_module.replace(".py" , "" ) )
368
from datetime import datetime import matplotlib.pyplot as plt import torch def SCREAMING_SNAKE_CASE ( snake_case_ : int ): for param in module.parameters(): snake_case__ : Tuple = False def SCREAMING_SNAKE_CASE ( ): snake_case__ : Any = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): snake_case__ : List[Any] = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def SCREAMING_SNAKE_CASE ( snake_case_ : str ): snake_case__ : List[str] = plt.imshow(snake_case_ ) fig.axes.get_xaxis().set_visible(snake_case_ ) fig.axes.get_yaxis().set_visible(snake_case_ ) plt.show() def SCREAMING_SNAKE_CASE ( ): snake_case__ : str = datetime.now() snake_case__ : Optional[Any] = current_time.strftime("%H:%M:%S" ) return timestamp
286
0