code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import re def a__ ( _UpperCamelCase : str ): if len(re.findall('''[ATCG]''' ,_UpperCamelCase ) ) != len(_UpperCamelCase ): raise ValueError('''Invalid Strand''' ) return dna.translate(dna.maketrans('''ATCG''' ,'''TAGC''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
330
import logging import os import threading import time try: import warnings except ImportError: a_ = None try: import msvcrt except ImportError: a_ = None try: import fcntl except ImportError: a_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: a_ = OSError # Data # ------------------------------------------------ a_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] a_ = """3.0.12""" a_ = None def a__ ( ): global _logger __lowerCamelCase = _logger or logging.getLogger(__name__ ) return _logger class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock_file return None def __str__( self ): '''simple docstring''' __lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class __lowerCAmelCase : def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock return None def __enter__( self ): '''simple docstring''' return self.lock def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.lock.release() return None class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase ) # The path to the lock file. __lowerCamelCase = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __lowerCamelCase = None # The default timeout value. __lowerCamelCase = timeout # We use this lock primarily for the lock counter. __lowerCamelCase = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __lowerCamelCase = 0 return None @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file @property def lowerCamelCase ( self ): '''simple docstring''' return self._timeout @timeout.setter def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = float(__UpperCAmelCase ) return None def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file_fd is not None def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ): '''simple docstring''' # Use the default timeout, if no timeout is provided. if timeout is None: __lowerCamelCase = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file __lowerCamelCase = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(__UpperCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __lowerCamelCase = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowerCamelCase ( self , __UpperCAmelCase=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __lowerCamelCase = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self ): '''simple docstring''' self.acquire() return self def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.release() return None def __del__( self ): '''simple docstring''' self.release(force=__UpperCAmelCase ) return None def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = os.path.basename(__UpperCAmelCase ) if len(__UpperCAmelCase ) > max_length and max_length > 0: __lowerCamelCase = os.path.dirname(__UpperCAmelCase ) __lowerCamelCase = str(hash(__UpperCAmelCase ) ) __lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(__UpperCAmelCase , __UpperCAmelCase ) else: return path class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) __lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: try: msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(__UpperCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) try: fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN ) os.close(__UpperCAmelCase ) return None class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' os.close(self._lock_file_fd ) __lowerCamelCase = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None a_ = None if msvcrt: a_ = WindowsFileLock elif fcntl: a_ = UnixFileLock else: a_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
330
1
def a__ ( ): for n in range(1 ,1_00_00_00 ): yield n * (n + 1) // 2 def a__ ( _UpperCamelCase : Union[str, Any] ): __lowerCamelCase = 1 __lowerCamelCase = 2 while i * i <= n: __lowerCamelCase = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def a__ ( ): return next(i for i in triangle_number_generator() if count_divisors(_UpperCamelCase ) > 5_00 ) if __name__ == "__main__": print(solution())
330
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase="divided_space_time" , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = num_channels __lowerCamelCase = patch_size __lowerCamelCase = num_frames __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = attention_type __lowerCamelCase = initializer_range __lowerCamelCase = scope __lowerCamelCase = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __lowerCamelCase = (image_size // patch_size) ** 2 __lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __lowerCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __lowerCamelCase = self.num_labels return config def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) # verify the logits shape __lowerCamelCase = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs __lowerCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowerCAmelCase__ = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerModelTester(self ) __lowerCamelCase = ConfigTester( self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = copy.deepcopy(__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def lowerCamelCase ( self ): '''simple docstring''' pass def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = TimesformerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' if not self.has_attentions: pass else: __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase = True for model_class in self.all_model_classes: __lowerCamelCase = self.model_tester.seq_length __lowerCamelCase = self.model_tester.num_frames __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __lowerCamelCase = len(__UpperCAmelCase ) # Check attention is always last and order is fine __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(out_len + 1 , len(__UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowerCamelCase ( self ): '''simple docstring''' def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.hidden_states __lowerCamelCase = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) __lowerCamelCase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def a__ ( ): __lowerCamelCase = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' ) __lowerCamelCase = np.load(_UpperCamelCase ) return list(_UpperCamelCase ) @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self ): '''simple docstring''' # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( __UpperCAmelCase ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_video() __lowerCamelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**__UpperCAmelCase ) # verify the logits __lowerCamelCase = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
1
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=1000 ): '''simple docstring''' __lowerCamelCase = p_stop __lowerCamelCase = max_length def __iter__( self ): '''simple docstring''' __lowerCamelCase = 0 __lowerCamelCase = False while not stop and count < self.max_length: yield count count += 1 __lowerCamelCase = random.random() < self.p_stop class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = [ BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 ) ] __lowerCamelCase = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] __lowerCamelCase = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ): '''simple docstring''' random.seed(__UpperCAmelCase ) __lowerCamelCase = list(__UpperCAmelCase ) __lowerCamelCase = [ IterableDatasetShard( __UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , ) for i in range(__UpperCAmelCase ) ] __lowerCamelCase = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(__UpperCAmelCase ) iterable_dataset_lists.append(list(__UpperCAmelCase ) ) __lowerCamelCase = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size __lowerCamelCase = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 ) __lowerCamelCase = [] for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(__UpperCAmelCase ) < len(__UpperCAmelCase ): reference += reference self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = 42 __lowerCamelCase = RandomIterableDataset() self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) # Edge case with a very small dataset __lowerCamelCase = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = SkipBatchSampler(__UpperCAmelCase , 2 ) self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 ) __lowerCamelCase = skip_first_batches(__UpperCAmelCase , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def lowerCamelCase ( self ): '''simple docstring''' Accelerator() __lowerCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
330
def a__ ( _UpperCamelCase : int ): if not isinstance(_UpperCamelCase ,_UpperCamelCase ): __lowerCamelCase = F"""Input value of [number={number}] must be an integer""" raise TypeError(_UpperCamelCase ) if number < 0: return False __lowerCamelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
330
1
import logging import os import threading import time try: import warnings except ImportError: a_ = None try: import msvcrt except ImportError: a_ = None try: import fcntl except ImportError: a_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: a_ = OSError # Data # ------------------------------------------------ a_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] a_ = """3.0.12""" a_ = None def a__ ( ): global _logger __lowerCamelCase = _logger or logging.getLogger(__name__ ) return _logger class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock_file return None def __str__( self ): '''simple docstring''' __lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class __lowerCAmelCase : def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock return None def __enter__( self ): '''simple docstring''' return self.lock def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.lock.release() return None class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase ) # The path to the lock file. __lowerCamelCase = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __lowerCamelCase = None # The default timeout value. __lowerCamelCase = timeout # We use this lock primarily for the lock counter. __lowerCamelCase = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __lowerCamelCase = 0 return None @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file @property def lowerCamelCase ( self ): '''simple docstring''' return self._timeout @timeout.setter def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = float(__UpperCAmelCase ) return None def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file_fd is not None def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ): '''simple docstring''' # Use the default timeout, if no timeout is provided. if timeout is None: __lowerCamelCase = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file __lowerCamelCase = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(__UpperCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __lowerCamelCase = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowerCamelCase ( self , __UpperCAmelCase=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __lowerCamelCase = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self ): '''simple docstring''' self.acquire() return self def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.release() return None def __del__( self ): '''simple docstring''' self.release(force=__UpperCAmelCase ) return None def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = os.path.basename(__UpperCAmelCase ) if len(__UpperCAmelCase ) > max_length and max_length > 0: __lowerCamelCase = os.path.dirname(__UpperCAmelCase ) __lowerCamelCase = str(hash(__UpperCAmelCase ) ) __lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(__UpperCAmelCase , __UpperCAmelCase ) else: return path class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) __lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: try: msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(__UpperCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) try: fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN ) os.close(__UpperCAmelCase ) return None class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' os.close(self._lock_file_fd ) __lowerCamelCase = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None a_ = None if msvcrt: a_ = WindowsFileLock elif fcntl: a_ = UnixFileLock else: a_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
330
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={"_".join([str(__UpperCAmelCase ) for s in shape] )}.npy""" def lowerCamelCase ( self ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 4, 64, 64) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return image def lowerCamelCase ( self , __UpperCAmelCase=False , __UpperCAmelCase="CompVis/stable-diffusion-v1-4" ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = '''bf16''' if fpaa else None __lowerCamelCase ,__lowerCamelCase = FlaxUNetaDConditionModel.from_pretrained( __UpperCAmelCase , subfolder='''unet''' , dtype=__UpperCAmelCase , revision=__UpperCAmelCase ) return model, params def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 77, 768) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , shape=(4, 4, 96, 96) , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , shape=(4, 77, 1024) , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
330
1
from __future__ import annotations from typing import Generic, TypeVar a_ = TypeVar("""T""") class __lowerCAmelCase ( Generic[T] ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = data __lowerCamelCase = self __lowerCamelCase = 0 class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # map from node name to the node object __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # create a new set with x as its member __lowerCamelCase = DisjointSetTreeNode(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # find the set x belongs to (with path-compression) __lowerCamelCase = self.map[data] if elem_ref != elem_ref.parent: __lowerCamelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # helper function for union operation if nodea.rank > nodea.rank: __lowerCamelCase = nodea else: __lowerCamelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # merge 2 disjoint sets self.link(self.find_set(__UpperCAmelCase ) , self.find_set(__UpperCAmelCase ) ) class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # connections: map from the node to the neighbouring nodes (with weights) __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # add a node ONLY if its not present in the graph if node not in self.connections: __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # add an edge with the given weight self.add_node(__UpperCAmelCase ) self.add_node(__UpperCAmelCase ) __lowerCamelCase = weight __lowerCamelCase = weight def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __UpperCAmelCase : x[2] ) # creating the disjoint set __lowerCamelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__UpperCAmelCase ) # MST generation __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = edges[index] index += 1 __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) disjoint_set.union(__UpperCAmelCase , __UpperCAmelCase ) return graph
330
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
1
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = AudioLDMPipeline lowerCAmelCase__ = TEXT_TO_AUDIO_PARAMS lowerCAmelCase__ = TEXT_TO_AUDIO_BATCH_PARAMS lowerCAmelCase__ = frozenset( [ """num_inference_steps""", """num_waveforms_per_prompt""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ] ) def lowerCamelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__UpperCAmelCase , ) __lowerCamelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) __lowerCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __lowerCamelCase = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , ) __lowerCamelCase = ClapTextModelWithProjection(__UpperCAmelCase ) __lowerCamelCase = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 ) __lowerCamelCase = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__UpperCAmelCase , ) __lowerCamelCase = SpeechTaHifiGan(__UpperCAmelCase ) __lowerCamelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''vocoder''': vocoder, } return components def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): '''simple docstring''' if str(__UpperCAmelCase ).startswith('''mps''' ): __lowerCamelCase = torch.manual_seed(__UpperCAmelCase ) else: __lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) __lowerCamelCase = { '''prompt''': '''A hammer hitting a wooden surface''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, } return inputs def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = AudioLDMPipeline(**__UpperCAmelCase ) __lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase ) __lowerCamelCase = audioldm_pipe(**__UpperCAmelCase ) __lowerCamelCase = output.audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) == 256 __lowerCamelCase = audio[:10] __lowerCamelCase = np.array( [-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = AudioLDMPipeline(**__UpperCAmelCase ) __lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase ) __lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase ) __lowerCamelCase = 3 * [inputs['''prompt''']] # forward __lowerCamelCase = audioldm_pipe(**__UpperCAmelCase ) __lowerCamelCase = output.audios[0] __lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase ) __lowerCamelCase = 3 * [inputs.pop('''prompt''' )] __lowerCamelCase = audioldm_pipe.tokenizer( __UpperCAmelCase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='''pt''' , ) __lowerCamelCase = text_inputs['''input_ids'''].to(__UpperCAmelCase ) __lowerCamelCase = audioldm_pipe.text_encoder( __UpperCAmelCase , ) __lowerCamelCase = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state __lowerCamelCase = F.normalize(__UpperCAmelCase , dim=-1 ) __lowerCamelCase = prompt_embeds # forward __lowerCamelCase = audioldm_pipe(**__UpperCAmelCase ) __lowerCamelCase = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = AudioLDMPipeline(**__UpperCAmelCase ) __lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase ) __lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase ) __lowerCamelCase = 3 * ['''this is a negative prompt'''] __lowerCamelCase = negative_prompt __lowerCamelCase = 3 * [inputs['''prompt''']] # forward __lowerCamelCase = audioldm_pipe(**__UpperCAmelCase ) __lowerCamelCase = output.audios[0] __lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase ) __lowerCamelCase = 3 * [inputs.pop('''prompt''' )] __lowerCamelCase = [] for p in [prompt, negative_prompt]: __lowerCamelCase = audioldm_pipe.tokenizer( __UpperCAmelCase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='''pt''' , ) __lowerCamelCase = text_inputs['''input_ids'''].to(__UpperCAmelCase ) __lowerCamelCase = audioldm_pipe.text_encoder( __UpperCAmelCase , ) __lowerCamelCase = text_embeds.text_embeds # additional L_2 normalization over each hidden-state __lowerCamelCase = F.normalize(__UpperCAmelCase , dim=-1 ) embeds.append(__UpperCAmelCase ) __lowerCamelCase ,__lowerCamelCase = embeds # forward __lowerCamelCase = audioldm_pipe(**__UpperCAmelCase ) __lowerCamelCase = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = PNDMScheduler(skip_prk_steps=__UpperCAmelCase ) __lowerCamelCase = AudioLDMPipeline(**__UpperCAmelCase ) __lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase ) __lowerCamelCase = '''egg cracking''' __lowerCamelCase = audioldm_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase ) __lowerCamelCase = output.audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) == 256 __lowerCamelCase = audio[:10] __lowerCamelCase = np.array( [-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = PNDMScheduler(skip_prk_steps=__UpperCAmelCase ) __lowerCamelCase = AudioLDMPipeline(**__UpperCAmelCase ) __lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __lowerCamelCase = '''A hammer hitting a wooden surface''' # test num_waveforms_per_prompt=1 (default) __lowerCamelCase = audioldm_pipe(__UpperCAmelCase , num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts __lowerCamelCase = 2 __lowerCamelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt __lowerCamelCase = 2 __lowerCamelCase = audioldm_pipe(__UpperCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=__UpperCAmelCase ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts __lowerCamelCase = 2 __lowerCamelCase = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__UpperCAmelCase ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = AudioLDMPipeline(**__UpperCAmelCase ) __lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __lowerCamelCase = audioldm_pipe.vocoder.config.sampling_rate __lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase ) __lowerCamelCase = audioldm_pipe(audio_length_in_s=0.016 , **__UpperCAmelCase ) __lowerCamelCase = output.audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) / vocoder_sampling_rate == 0.016 __lowerCamelCase = audioldm_pipe(audio_length_in_s=0.032 , **__UpperCAmelCase ) __lowerCamelCase = output.audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) / vocoder_sampling_rate == 0.032 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = AudioLDMPipeline(**__UpperCAmelCase ) __lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __lowerCamelCase = ['''hey'''] __lowerCamelCase = audioldm_pipe(__UpperCAmelCase , num_inference_steps=1 ) __lowerCamelCase = output.audios.shape assert audio_shape == (1, 256) __lowerCamelCase = audioldm_pipe.vocoder.config config.model_in_dim *= 2 __lowerCamelCase = SpeechTaHifiGan(__UpperCAmelCase ).to(__UpperCAmelCase ) __lowerCamelCase = audioldm_pipe(__UpperCAmelCase , num_inference_steps=1 ) __lowerCamelCase = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def lowerCamelCase ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' self._test_inference_batch_single_identical(test_mean_pixel_difference=__UpperCAmelCase ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def lowerCamelCase ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCAmelCase ) @slow class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ): '''simple docstring''' __lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) __lowerCamelCase = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 8, 128, 16) ) __lowerCamelCase = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) __lowerCamelCase = { '''prompt''': '''A hammer hitting a wooden surface''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 2.5, } return inputs def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' ) __lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __lowerCamelCase = self.get_inputs(__UpperCAmelCase ) __lowerCamelCase = 25 __lowerCamelCase = audioldm_pipe(**__UpperCAmelCase ).audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) == 81920 __lowerCamelCase = audio[77230:77240] __lowerCamelCase = np.array( [-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] ) __lowerCamelCase = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' ) __lowerCamelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) __lowerCamelCase = audioldm_pipe.to(__UpperCAmelCase ) audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __lowerCamelCase = self.get_inputs(__UpperCAmelCase ) __lowerCamelCase = audioldm_pipe(**__UpperCAmelCase ).audios[0] assert audio.ndim == 1 assert len(__UpperCAmelCase ) == 81920 __lowerCamelCase = audio[27780:27790] __lowerCamelCase = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] ) __lowerCamelCase = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
330
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def a__ ( _UpperCamelCase : Optional[int] ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __lowerCamelCase = module __lowerCamelCase = nn.Sequential( nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , ) __lowerCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowerCamelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module lowerCAmelCase__ = """bigscience/bloom-1b7""" # Constant values lowerCAmelCase__ = 2.1_09_65_95_52_69_25_74 lowerCAmelCase__ = """Hello my name is""" lowerCAmelCase__ = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) lowerCAmelCase__ = 1_0 def lowerCamelCase ( self ): '''simple docstring''' # Models and tokenizer __lowerCamelCase = AutoTokenizer.from_pretrained(self.model_name ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # Models and tokenizer __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='''auto''' ) __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_abit.config self.assertTrue(hasattr(__UpperCAmelCase , '''quantization_config''' ) ) __lowerCamelCase = config.to_dict() __lowerCamelCase = config.to_diff_dict() __lowerCamelCase = config.to_json_string() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit __lowerCamelCase = self.model_fpaa.get_memory_footprint() __lowerCamelCase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __lowerCamelCase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__UpperCAmelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() __lowerCamelCase = True __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = model_abit_from_config.generate( input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() with self.assertRaises(__UpperCAmelCase ): __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ): # Tries with `str` self.model_abit.to('''cpu''' ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device('''cuda:0''' ) ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_fpaa.to(torch.floataa ) __lowerCamelCase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.to('''cpu''' ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.half() # Check this does not throw an error __lowerCamelCase = self.model_fpaa.float() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls ): '''simple docstring''' __lowerCamelCase = '''t5-small''' __lowerCamelCase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense __lowerCamelCase = AutoTokenizer.from_pretrained(cls.model_name ) __lowerCamelCase = '''Translate in German: Hello, my dog is cute''' def lowerCamelCase ( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaForConditionalGeneration __lowerCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules __lowerCamelCase = None # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) __lowerCamelCase = modules def lowerCamelCase ( self ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # model_name __lowerCamelCase = '''bigscience/bloom-560m''' __lowerCamelCase = '''t5-small''' # Different types of model __lowerCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Sequence classification model __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # CausalLM model __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Seq2seq model __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = pipeline( '''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __lowerCamelCase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''balanced''' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) # Second real batch __lowerCamelCase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''facebook/opt-350m''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ): return # Step 1: freeze all parameters __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __lowerCamelCase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __lowerCamelCase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__UpperCAmelCase ) ): __lowerCamelCase = LoRALayer(module.q_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.k_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __lowerCamelCase = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __lowerCamelCase = model.forward(**__UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__UpperCAmelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """gpt2-xl""" lowerCAmelCase__ = 3.31_91_85_48_54_15_21_87
330
1
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = None lowerCAmelCase__ = BloomTokenizerFast lowerCAmelCase__ = BloomTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = """tokenizer_file""" lowerCAmelCase__ = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""} def lowerCamelCase ( self ): '''simple docstring''' super().setUp() __lowerCamelCase = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase ( self , **__UpperCAmelCase ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>'''] __lowerCamelCase = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]] __lowerCamelCase = tokenizer.batch_encode_plus(__UpperCAmelCase )['''input_ids'''] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase=6 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowerCamelCase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input __lowerCamelCase = '''This is a simple input''' __lowerCamelCase = ['''This is a simple input 1''', '''This is a simple input 2'''] __lowerCamelCase = ('''This is a simple input''', '''This is a pair''') __lowerCamelCase = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests try: tokenizer_r.encode(__UpperCAmelCase , max_length=__UpperCAmelCase ) tokenizer_r.encode_plus(__UpperCAmelCase , max_length=__UpperCAmelCase ) tokenizer_r.batch_encode_plus(__UpperCAmelCase , max_length=__UpperCAmelCase ) tokenizer_r.encode(__UpperCAmelCase , max_length=__UpperCAmelCase ) tokenizer_r.batch_encode_plus(__UpperCAmelCase , max_length=__UpperCAmelCase ) except ValueError: self.fail('''Bloom Tokenizer should be able to deal with padding''' ) __lowerCamelCase = None # Hotfixing padding = None self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' ) # Simple input self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' ) # Simple input self.assertRaises( __UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' , ) # Pair input self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' ) # Pair input self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' ) # Pair input self.assertRaises( __UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' , ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=__UpperCAmelCase ) __lowerCamelCase = next(iter(__UpperCAmelCase ) )['''premise'''] # pick up one data __lowerCamelCase = list(sample_data.values() ) __lowerCamelCase = list(map(tokenizer.encode , __UpperCAmelCase ) ) __lowerCamelCase = [tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase ) for x in output_tokens] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
330
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = 42 class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ = True @register_to_config def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (64,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 4 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 0.18_215 , ): '''simple docstring''' super().__init__() # pass init params to Encoder __lowerCamelCase = Encoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , ) # pass init params to Decoder __lowerCamelCase = Decoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , ) __lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __lowerCamelCase = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 ) __lowerCamelCase = False __lowerCamelCase = False # only relevant if vae tiling is enabled __lowerCamelCase = self.config.sample_size __lowerCamelCase = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __lowerCamelCase = 0.25 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' if isinstance(__UpperCAmelCase , (Encoder, Decoder) ): __lowerCamelCase = value def lowerCamelCase ( self , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = use_tiling def lowerCamelCase ( self ): '''simple docstring''' self.enable_tiling(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = True def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = {} def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): __lowerCamelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return processors def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = len(self.attn_processors.keys() ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count: raise ValueError( F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the""" F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): module.set_processor(__UpperCAmelCase ) else: module.set_processor(processor.pop(F"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) if self.use_slicing and x.shape[0] > 1: __lowerCamelCase = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_slicing and z.shape[0] > 1: __lowerCamelCase = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self._decode(__UpperCAmelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[2] , b.shape[2] , __UpperCAmelCase ) for y in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[3] , b.shape[3] , __UpperCAmelCase ) for x in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __lowerCamelCase = [] for i in range(0 , x.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , x.shape[3] , __UpperCAmelCase ): __lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __lowerCamelCase = [] for i in range(0 , z.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , z.shape[3] , __UpperCAmelCase ): __lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ): '''simple docstring''' __lowerCamelCase = sample __lowerCamelCase = self.encode(__UpperCAmelCase ).latent_dist if sample_posterior: __lowerCamelCase = posterior.sample(generator=__UpperCAmelCase ) else: __lowerCamelCase = posterior.mode() __lowerCamelCase = self.decode(__UpperCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase )
330
1
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=10 , __UpperCAmelCase=[10, 20, 30, 40] , __UpperCAmelCase=[1, 1, 2, 1] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=3 , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = num_channels __lowerCamelCase = embeddings_size __lowerCamelCase = hidden_sizes __lowerCamelCase = depths __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_act __lowerCamelCase = num_labels __lowerCamelCase = scope __lowerCamelCase = len(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = self.get_config() return config, pixel_values def lowerCamelCase ( self ): '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = FlaxRegNetModel(config=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = FlaxRegNetForImageClassification(config=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase ,__lowerCamelCase = config_and_inputs __lowerCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = FlaxRegNetModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self ): '''simple docstring''' return def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def lowerCamelCase ( self ): '''simple docstring''' pass def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) __lowerCamelCase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = model_class(__UpperCAmelCase ) __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowerCamelCase = self.model_tester.num_stages self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 ) __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = model_class(__UpperCAmelCase ) @jax.jit def model_jitted(__UpperCAmelCase , **__UpperCAmelCase ): return model(pixel_values=__UpperCAmelCase , **__UpperCAmelCase ) with self.subTest('''JIT Enabled''' ): __lowerCamelCase = model_jitted(**__UpperCAmelCase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): __lowerCamelCase = model_jitted(**__UpperCAmelCase ).to_tuple() self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def a__ ( ): __lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self ): '''simple docstring''' return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='''np''' ) __lowerCamelCase = model(**__UpperCAmelCase ) # verify the logits __lowerCamelCase = (1, 1000) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __lowerCamelCase = jnp.array([-0.4_180, -1.5_051, -3.4_836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration a_ = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] a_ = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] a_ = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) a_ = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) a_ = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ): for tf_name, hf_name in patterns: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase ) __lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() __lowerCamelCase = {} # separating decoder weights __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = DECODER_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = REMAINING_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" __lowerCamelCase = mapping['''model.embed_positions.weight'''] __lowerCamelCase = mapping.pop('''model.embed_positions.weight''' ) __lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : int ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ): __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() a_ = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
330
1
from __future__ import annotations a_ = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = graph # mapping node to its parent in resulting breadth first tree __lowerCamelCase = {} __lowerCamelCase = source_vertex def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = {self.source_vertex} __lowerCamelCase = None __lowerCamelCase = [self.source_vertex] # first in first out queue while queue: __lowerCamelCase = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(__UpperCAmelCase ) __lowerCamelCase = vertex queue.append(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex __lowerCamelCase = self.parent.get(__UpperCAmelCase ) if target_vertex_parent is None: __lowerCamelCase = ( F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}""" ) raise ValueError(__UpperCAmelCase ) return self.shortest_path(__UpperCAmelCase ) + F"""->{target_vertex}""" if __name__ == "__main__": a_ = Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
330
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch a_ = logging.get_logger(__name__) class __lowerCAmelCase : def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase=None ): '''simple docstring''' if not conversation_id: __lowerCamelCase = uuid.uuida() if past_user_inputs is None: __lowerCamelCase = [] if generated_responses is None: __lowerCamelCase = [] __lowerCamelCase = conversation_id __lowerCamelCase = past_user_inputs __lowerCamelCase = generated_responses __lowerCamelCase = text def __eq__( self , __UpperCAmelCase ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) __lowerCamelCase = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: __lowerCamelCase = text def lowerCamelCase ( self ): '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __lowerCamelCase = None def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' self.generated_responses.append(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): '''simple docstring''' __lowerCamelCase = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): __lowerCamelCase = '''user''' if is_user else '''bot''' output += F"""{name} >> {text} \n""" return output @add_end_docstrings( lowerCAmelCase__ , r""" min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. """ , ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: __lowerCamelCase = self.tokenizer.eos_token def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = {} __lowerCamelCase = {} __lowerCamelCase = {} if min_length_for_response is not None: __lowerCamelCase = min_length_for_response if minimum_tokens is not None: __lowerCamelCase = minimum_tokens if "max_length" in generate_kwargs: __lowerCamelCase = generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __lowerCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self , __UpperCAmelCase , __UpperCAmelCase=0 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = super().__call__(__UpperCAmelCase , num_workers=__UpperCAmelCase , **__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=32 ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ): __lowerCamelCase = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version __lowerCamelCase = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": __lowerCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": __lowerCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=10 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) __lowerCamelCase = model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) __lowerCamelCase = max_length - minimum_tokens __lowerCamelCase = model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: __lowerCamelCase = model_inputs['''attention_mask'''][:, -trim:] __lowerCamelCase = model_inputs.pop('''conversation''' ) __lowerCamelCase = max_length __lowerCamelCase = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase ) if self.model.config.is_encoder_decoder: __lowerCamelCase = 1 else: __lowerCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = model_outputs['''output_ids'''] __lowerCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , ) __lowerCamelCase = model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.tokenizer.eos_token_id __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: __lowerCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
330
1
import math import sys def a__ ( _UpperCamelCase : str ): __lowerCamelCase = '''''' try: with open(_UpperCamelCase ,'''rb''' ) as binary_file: __lowerCamelCase = binary_file.read() for dat in data: __lowerCamelCase = F"""{dat:08b}""" result += curr_byte return result except OSError: print('''File not accessible''' ) sys.exit() def a__ ( _UpperCamelCase : str ): __lowerCamelCase = {'''0''': '''0''', '''1''': '''1'''} __lowerCamelCase ,__lowerCamelCase = '''''', '''''' __lowerCamelCase = len(_UpperCamelCase ) for i in range(len(_UpperCamelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __lowerCamelCase = lexicon[curr_string] result += last_match_id __lowerCamelCase = last_match_id + '''0''' if math.loga(_UpperCamelCase ).is_integer(): __lowerCamelCase = {} for curr_key in list(_UpperCamelCase ): __lowerCamelCase = lexicon.pop(_UpperCamelCase ) __lowerCamelCase = new_lex __lowerCamelCase = last_match_id + '''1''' index += 1 __lowerCamelCase = '''''' return result def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): __lowerCamelCase = 8 try: with open(_UpperCamelCase ,'''wb''' ) as opened_file: __lowerCamelCase = [ to_write[i : i + byte_length] for i in range(0 ,len(_UpperCamelCase ) ,_UpperCamelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('''10000000''' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(_UpperCamelCase ,2 ).to_bytes(1 ,byteorder='''big''' ) ) except OSError: print('''File not accessible''' ) sys.exit() def a__ ( _UpperCamelCase : str ): __lowerCamelCase = 0 for letter in data_bits: if letter == "1": break counter += 1 __lowerCamelCase = data_bits[counter:] __lowerCamelCase = data_bits[counter + 1 :] return data_bits def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): __lowerCamelCase = read_file_binary(_UpperCamelCase ) __lowerCamelCase = remove_prefix(_UpperCamelCase ) __lowerCamelCase = decompress_data(_UpperCamelCase ) write_file_binary(_UpperCamelCase ,_UpperCamelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
330
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a_ = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["""memory_attention""", """encoder_attn"""], ["""attention""", """attn"""], ["""/""", """."""], [""".LayerNorm.gamma""", """_layer_norm.weight"""], [""".LayerNorm.beta""", """_layer_norm.bias"""], ["""r.layer_""", """r.layers."""], ["""output_proj""", """out_proj"""], ["""ffn.dense_1.""", """fc2."""], ["""ffn.dense.""", """fc1."""], ["""ffn_layer_norm""", """final_layer_norm"""], ["""kernel""", """weight"""], ["""encoder_layer_norm.""", """encoder.layer_norm."""], ["""decoder_layer_norm.""", """decoder.layer_norm."""], ["""embeddings.weights""", """shared.weight"""], ] def a__ ( _UpperCamelCase : int ): for pegasus_name, hf_name in PATTERNS: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = DEFAULTS.copy() cfg_kwargs.update(_UpperCamelCase ) __lowerCamelCase = PegasusConfig(**_UpperCamelCase ) __lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.model.state_dict() __lowerCamelCase = {} for k, v in tf_weights.items(): __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ) if new_k not in sd: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if "dense" in k or "proj" in new_k: __lowerCamelCase = v.T __lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}""" # make sure embedding.padding_idx is respected __lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] ) __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping} mapping.update(**_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''Adafactor''', '''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # save tokenizer first __lowerCamelCase = Path(_UpperCamelCase ).parent.name __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings'''] __lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(_UpperCamelCase ) # convert model __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""] if dataset == "large": __lowerCamelCase = task_specific_params __lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() sd.pop('''model.decoder.embed_positions.weight''' ) sd.pop('''model.encoder.embed_positions.weight''' ) torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() if args.save_dir is None: a_ = Path(args.tf_ckpt_path).parent.name a_ = os.path.join("""pegasus""", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
330
1
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("""Googling.....""") a_ = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:]) a_ = requests.get(url, headers={"""UserAgent""": UserAgent().random}) # res.raise_for_status() with open("""project1a.html""", """wb""") as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) a_ = BeautifulSoup(res.text, """html.parser""") a_ = list(soup.select(""".eZt8xd"""))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("""href""")) else: webbrowser.open(f"https://google.com{link.get('href')}")
330
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } a_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ): for attribute in key.split('''.''' ): __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ) if weight_type is not None: __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape else: __lowerCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value else: __lowerCamelCase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Any ): __lowerCamelCase = [] __lowerCamelCase = fairseq_model.state_dict() __lowerCamelCase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,) __lowerCamelCase = True else: for key, mapped_key in MAPPING.items(): __lowerCamelCase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2] __lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase ) if "weight_g" in name: __lowerCamelCase = '''weight_g''' elif "weight_v" in name: __lowerCamelCase = '''weight_v''' elif "bias" in name: __lowerCamelCase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCamelCase = '''weight''' else: __lowerCamelCase = None set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) continue if not is_used: unused_weights.append(_UpperCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ): __lowerCamelCase = full_name.split('''conv_layers.''' )[-1] __lowerCamelCase = name.split('''.''' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_UpperCamelCase ) @torch.no_grad() def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : List[Any]=True ): if config_path is not None: __lowerCamelCase = UniSpeechSatConfig.from_pretrained(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatConfig() __lowerCamelCase = '''''' if is_finetuned: __lowerCamelCase = UniSpeechSatForCTC(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatForPreTraining(_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) __lowerCamelCase = model[0].eval() recursively_load_weights(_UpperCamelCase ,_UpperCamelCase ) hf_wavavec.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) a_ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
330
1
import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) a_ = logging.getLogger() def a__ ( _UpperCamelCase : Optional[Any] ): __lowerCamelCase = {} __lowerCamelCase = os.path.join(_UpperCamelCase ,'''all_results.json''' ) if os.path.exists(_UpperCamelCase ): with open(_UpperCamelCase ,'''r''' ) as f: __lowerCamelCase = json.load(_UpperCamelCase ) else: raise ValueError(F"""can't find {path}""" ) return results a_ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' import xla_spawn __lowerCamelCase = self.get_auto_remove_tmp_dir() __lowerCamelCase = F""" ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(__UpperCAmelCase , '''argv''' , __UpperCAmelCase ): __lowerCamelCase = time() xla_spawn.main() __lowerCamelCase = time() __lowerCamelCase = get_results(__UpperCAmelCase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500 ) def lowerCamelCase ( self ): '''simple docstring''' import xla_spawn __lowerCamelCase = ''' ./tests/test_trainer_tpu.py --num_cores=8 ./tests/test_trainer_tpu.py '''.split() with patch.object(__UpperCAmelCase , '''argv''' , __UpperCAmelCase ): xla_spawn.main()
330
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING a_ = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase__ ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type(__UpperCAmelCase ) def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , **__UpperCAmelCase ): '''simple docstring''' return {}, {}, {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = load_image(__UpperCAmelCase ) __lowerCamelCase = image.size __lowerCamelCase = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework ) return model_inputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.model(**__UpperCAmelCase ) return model_outputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = model_outputs.predicted_depth __lowerCamelCase = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=__UpperCAmelCase ) __lowerCamelCase = prediction.squeeze().cpu().numpy() __lowerCamelCase = (output * 255 / np.max(__UpperCAmelCase )).astype('''uint8''' ) __lowerCamelCase = Image.fromarray(__UpperCAmelCase ) __lowerCamelCase = {} __lowerCamelCase = predicted_depth __lowerCamelCase = depth return output_dict
330
1
a_ = { "joule": 1.0, "kilojoule": 1_000, "megajoule": 1_000_000, "gigajoule": 1_000_000_000, "wattsecond": 1.0, "watthour": 3_600, "kilowatthour": 3_600_000, "newtonmeter": 1.0, "calorie_nutr": 4_186.8, "kilocalorie_nutr": 4_186_800.00, "electronvolt": 1.602176634E-19, "britishthermalunit_it": 1_055.05_585, "footpound": 1.35_58_18, } def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : float ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: __lowerCamelCase = ( F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" F"""Valid values are: {", ".join(_UpperCamelCase )}""" ) raise ValueError(_UpperCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
330
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""pixel_values"""] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __lowerCamelCase = size if size is not None else {'''shortest_edge''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __lowerCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = resample __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_normalize __lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD __lowerCamelCase = do_convert_rgb def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowerCamelCase = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = size if size is not None else self.size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = resample if resample is not None else self.resample __lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCamelCase = crop_size if crop_size is not None else self.crop_size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase = image_mean if image_mean is not None else self.image_mean __lowerCamelCase = image_std if image_std is not None else self.image_std __lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowerCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowerCamelCase = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: __lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: __lowerCamelCase = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: __lowerCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] __lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __lowerCamelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
330
1
def a__ ( _UpperCamelCase : int = 4_00_00_00 ): __lowerCamelCase = [] __lowerCamelCase ,__lowerCamelCase = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = b, a + b return sum(_UpperCamelCase ) if __name__ == "__main__": print(f"{solution() = }")
330
from __future__ import annotations from typing import Generic, TypeVar a_ = TypeVar("""T""") class __lowerCAmelCase ( Generic[T] ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = data __lowerCamelCase = self __lowerCamelCase = 0 class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # map from node name to the node object __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # create a new set with x as its member __lowerCamelCase = DisjointSetTreeNode(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # find the set x belongs to (with path-compression) __lowerCamelCase = self.map[data] if elem_ref != elem_ref.parent: __lowerCamelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # helper function for union operation if nodea.rank > nodea.rank: __lowerCamelCase = nodea else: __lowerCamelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # merge 2 disjoint sets self.link(self.find_set(__UpperCAmelCase ) , self.find_set(__UpperCAmelCase ) ) class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # connections: map from the node to the neighbouring nodes (with weights) __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # add a node ONLY if its not present in the graph if node not in self.connections: __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # add an edge with the given weight self.add_node(__UpperCAmelCase ) self.add_node(__UpperCAmelCase ) __lowerCamelCase = weight __lowerCamelCase = weight def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __UpperCAmelCase : x[2] ) # creating the disjoint set __lowerCamelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__UpperCAmelCase ) # MST generation __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = edges[index] index += 1 __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) disjoint_set.union(__UpperCAmelCase , __UpperCAmelCase ) return graph
330
1
from ....configuration_utils import PretrainedConfig from ....utils import logging a_ = logging.get_logger(__name__) a_ = { """speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""", # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """mctct""" def __init__( self , __UpperCAmelCase=8065 , __UpperCAmelCase=1536 , __UpperCAmelCase=36 , __UpperCAmelCase=6144 , __UpperCAmelCase=4 , __UpperCAmelCase=384 , __UpperCAmelCase=920 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.3 , __UpperCAmelCase="relu" , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.3 , __UpperCAmelCase=0.3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0.3 , __UpperCAmelCase=1 , __UpperCAmelCase=(7,) , __UpperCAmelCase=(3,) , __UpperCAmelCase=80 , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase="sum" , __UpperCAmelCase=False , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = intermediate_size __lowerCamelCase = num_attention_heads __lowerCamelCase = attention_head_dim __lowerCamelCase = max_position_embeddings __lowerCamelCase = layer_norm_eps __lowerCamelCase = layerdrop __lowerCamelCase = hidden_act __lowerCamelCase = initializer_range __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = pad_token_id __lowerCamelCase = bos_token_id __lowerCamelCase = eos_token_id __lowerCamelCase = conv_glu_dim __lowerCamelCase = conv_dropout __lowerCamelCase = num_conv_layers __lowerCamelCase = input_feat_per_channel __lowerCamelCase = input_channels __lowerCamelCase = conv_channels __lowerCamelCase = ctc_loss_reduction __lowerCamelCase = ctc_zero_infinity # prevents config testing fail with exporting to json __lowerCamelCase = list(__UpperCAmelCase ) __lowerCamelCase = list(__UpperCAmelCase ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` ''' F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """ F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
330
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self ): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_config() __lowerCamelCase = 300 return config def lowerCamelCase ( self ): '''simple docstring''' ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = self.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = True __lowerCamelCase = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_choices __lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = () def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCamelCase = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason='''MRA does not output attentions''' ) def lowerCamelCase ( self ): '''simple docstring''' return @require_torch class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) __lowerCamelCase = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
1
from manim import * class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = Rectangle(height=0.5 , width=0.5 ) __lowerCamelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) __lowerCamelCase = [mem.copy() for i in range(6 )] __lowerCamelCase = [mem.copy() for i in range(6 )] __lowerCamelCase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) __lowerCamelCase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) __lowerCamelCase = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) __lowerCamelCase = Text('''CPU''' , font_size=24 ) __lowerCamelCase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__UpperCAmelCase ) __lowerCamelCase = [mem.copy() for i in range(4 )] __lowerCamelCase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) __lowerCamelCase = Text('''GPU''' , font_size=24 ) __lowerCamelCase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__UpperCAmelCase ) __lowerCamelCase = [mem.copy() for i in range(6 )] __lowerCamelCase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) __lowerCamelCase = Text('''Model''' , font_size=24 ) __lowerCamelCase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__UpperCAmelCase ) __lowerCamelCase = [] for i, rect in enumerate(__UpperCAmelCase ): rect.set_stroke(__UpperCAmelCase ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) __lowerCamelCase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=__UpperCAmelCase , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCAmelCase , buff=0.0 ) self.add(__UpperCAmelCase ) cpu_targs.append(__UpperCAmelCase ) __lowerCamelCase = [mem.copy() for i in range(6 )] __lowerCamelCase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) __lowerCamelCase = Text('''Loaded Checkpoint''' , font_size=24 ) __lowerCamelCase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , aligned_edge=__UpperCAmelCase , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) __lowerCamelCase = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __lowerCamelCase = MarkupText( F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = MarkupText( F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) __lowerCamelCase = MarkupText( F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__UpperCAmelCase ) , Write(__UpperCAmelCase ) ) self.play(Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) ) __lowerCamelCase = [] __lowerCamelCase = [] for i, rect in enumerate(__UpperCAmelCase ): __lowerCamelCase = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 ) target.move_to(__UpperCAmelCase ) first_animations.append(GrowFromCenter(__UpperCAmelCase , run_time=1 ) ) __lowerCamelCase = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) ) self.play(*__UpperCAmelCase ) self.play(*__UpperCAmelCase ) self.wait()
330
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
1
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def a__ ( _UpperCamelCase : Optional[Any] ): if is_torch_version('''<''' ,'''2.0.0''' ) or not hasattr(_UpperCamelCase ,'''_dynamo''' ): return False return isinstance(_UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule ) def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : bool = True ): __lowerCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) __lowerCamelCase = is_compiled_module(_UpperCamelCase ) if is_compiled: __lowerCamelCase = model __lowerCamelCase = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(_UpperCamelCase ,_UpperCamelCase ): __lowerCamelCase = model.module if not keep_fpaa_wrapper: __lowerCamelCase = getattr(_UpperCamelCase ,'''forward''' ) __lowerCamelCase = model.__dict__.pop('''_original_forward''' ,_UpperCamelCase ) if original_forward is not None: while hasattr(_UpperCamelCase ,'''__wrapped__''' ): __lowerCamelCase = forward.__wrapped__ if forward == original_forward: break __lowerCamelCase = forward if getattr(_UpperCamelCase ,'''_converted_to_transformer_engine''' ,_UpperCamelCase ): convert_model(_UpperCamelCase ,to_transformer_engine=_UpperCamelCase ) if is_compiled: __lowerCamelCase = model __lowerCamelCase = compiled_model return model def a__ ( ): PartialState().wait_for_everyone() def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Any ): if PartialState().distributed_type == DistributedType.TPU: xm.save(_UpperCamelCase ,_UpperCamelCase ) elif PartialState().local_process_index == 0: torch.save(_UpperCamelCase ,_UpperCamelCase ) @contextmanager def a__ ( **_UpperCamelCase : Optional[int] ): for key, value in kwargs.items(): __lowerCamelCase = str(_UpperCamelCase ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def a__ ( _UpperCamelCase : int ): if not hasattr(_UpperCamelCase ,'''__qualname__''' ) and not hasattr(_UpperCamelCase ,'''__name__''' ): __lowerCamelCase = getattr(_UpperCamelCase ,'''__class__''' ,_UpperCamelCase ) if hasattr(_UpperCamelCase ,'''__qualname__''' ): return obj.__qualname__ if hasattr(_UpperCamelCase ,'''__name__''' ): return obj.__name__ return str(_UpperCamelCase ) def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Any ): for key, value in source.items(): if isinstance(_UpperCamelCase ,_UpperCamelCase ): __lowerCamelCase = destination.setdefault(_UpperCamelCase ,{} ) merge_dicts(_UpperCamelCase ,_UpperCamelCase ) else: __lowerCamelCase = value return destination def a__ ( _UpperCamelCase : int = None ): if port is None: __lowerCamelCase = 2_95_00 with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s: return s.connect_ex(('''localhost''', port) ) == 0
330
from string import ascii_lowercase, ascii_uppercase def a__ ( _UpperCamelCase : str ): if not sentence: return "" __lowerCamelCase = dict(zip(_UpperCamelCase ,_UpperCamelCase ) ) return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
330
1
import os def a__ ( _UpperCamelCase : str ): __lowerCamelCase = len(grid[0] ) __lowerCamelCase = len(_UpperCamelCase ) __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(_UpperCamelCase ): for j in range(n_rows - 3 ): __lowerCamelCase = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] __lowerCamelCase = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: __lowerCamelCase = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: __lowerCamelCase = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) __lowerCamelCase = max( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) if max_product > largest: __lowerCamelCase = max_product return largest def a__ ( ): __lowerCamelCase = [] with open(os.path.dirname(_UpperCamelCase ) + '''/grid.txt''' ) as file: for line in file: grid.append(line.strip('''\n''' ).split(''' ''' ) ) __lowerCamelCase = [[int(_UpperCamelCase ) for i in grid[j]] for j in range(len(_UpperCamelCase ) )] return largest_product(_UpperCamelCase ) if __name__ == "__main__": print(solution())
330
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __lowerCAmelCase ( lowerCAmelCase__ ): @slow @require_torch def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' ) __lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) __lowerCamelCase = bertabert.config.encoder.vocab_size __lowerCamelCase = tokenizer.sep_token_id __lowerCamelCase = tokenizer.cls_token_id __lowerCamelCase = 128 __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' ) __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' ) __lowerCamelCase = train_dataset.select(range(32 ) ) __lowerCamelCase = val_dataset.select(range(16 ) ) __lowerCamelCase = 4 def _map_to_encoder_decoder_inputs(__UpperCAmelCase ): # Tokenizer will automatically set [BOS] <text> [EOS] __lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 ) __lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 ) __lowerCamelCase = inputs.input_ids __lowerCamelCase = inputs.attention_mask __lowerCamelCase = outputs.input_ids __lowerCamelCase = outputs.input_ids.copy() __lowerCamelCase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels'''] ] __lowerCamelCase = outputs.attention_mask assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(__UpperCAmelCase ): __lowerCamelCase = pred.label_ids __lowerCamelCase = pred.predictions # all unnecessary tokens are removed __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase ) return {"accuracy": accuracy} # map train dataset __lowerCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) train_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) # same for validation dataset __lowerCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) val_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) __lowerCamelCase = self.get_auto_remove_tmp_dir() __lowerCamelCase = SeqaSeqTrainingArguments( output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __lowerCamelCase = SeqaSeqTrainer( model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , ) # start training trainer.train()
330
1
from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def a__ ( _UpperCamelCase : bool = True ,*_UpperCamelCase : Optional[Any] ,**_UpperCamelCase : Tuple ): if not is_tqdm_available(): raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' ) __lowerCamelCase = False if main_process_only: __lowerCamelCase = PartialState().local_process_index == 0 return _tqdm(*_UpperCamelCase ,**_UpperCamelCase ,disable=_UpperCamelCase )
330
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
1
from cva import destroyAllWindows, imread, imshow, waitKey def a__ ( _UpperCamelCase : Dict ): # getting number of pixels in the image __lowerCamelCase ,__lowerCamelCase = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): __lowerCamelCase = [2_55, 2_55, 2_55] - img[i][j] return img if __name__ == "__main__": # read original image a_ = imread("""image_data/lena.jpg""", 1) # convert to its negative a_ = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
330
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=1000 ): '''simple docstring''' __lowerCamelCase = p_stop __lowerCamelCase = max_length def __iter__( self ): '''simple docstring''' __lowerCamelCase = 0 __lowerCamelCase = False while not stop and count < self.max_length: yield count count += 1 __lowerCamelCase = random.random() < self.p_stop class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = [ BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 ) ] __lowerCamelCase = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] __lowerCamelCase = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ): '''simple docstring''' random.seed(__UpperCAmelCase ) __lowerCamelCase = list(__UpperCAmelCase ) __lowerCamelCase = [ IterableDatasetShard( __UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , ) for i in range(__UpperCAmelCase ) ] __lowerCamelCase = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(__UpperCAmelCase ) iterable_dataset_lists.append(list(__UpperCAmelCase ) ) __lowerCamelCase = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size __lowerCamelCase = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 ) __lowerCamelCase = [] for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(__UpperCAmelCase ) < len(__UpperCAmelCase ): reference += reference self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = 42 __lowerCamelCase = RandomIterableDataset() self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) # Edge case with a very small dataset __lowerCamelCase = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = SkipBatchSampler(__UpperCAmelCase , 2 ) self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 ) __lowerCamelCase = skip_first_batches(__UpperCAmelCase , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def lowerCamelCase ( self ): '''simple docstring''' Accelerator() __lowerCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
330
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = torch.device("""cpu""") def a__ ( ): __lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCamelCase = Image.open(requests.get(_UpperCamelCase ,stream=_UpperCamelCase ).raw ) return im def a__ ( _UpperCamelCase : Dict ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] ) def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : str ,_UpperCamelCase : Any ): __lowerCamelCase = dct.pop(_UpperCamelCase ) __lowerCamelCase = val def a__ ( _UpperCamelCase : Optional[Any] ): __lowerCamelCase = [] for k in state_dict.keys(): __lowerCamelCase = k if ".pwconv" in k: __lowerCamelCase = k_new.replace('''.pwconv''' ,'''.point_wise_conv''' ) if ".dwconv" in k: __lowerCamelCase = k_new.replace('''.dwconv''' ,'''.depth_wise_conv''' ) if ".Proj." in k: __lowerCamelCase = k_new.replace('''.Proj.''' ,'''.proj.''' ) if "patch_embed" in k_new: __lowerCamelCase = k_new.replace('''patch_embed''' ,'''swiftformer.patch_embed.patch_embedding''' ) if "network" in k_new: __lowerCamelCase = k_new.split('''.''' ) if ls[2].isdigit(): __lowerCamelCase = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] ) else: __lowerCamelCase = k_new.replace('''network''' ,'''swiftformer.encoder.network''' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Optional[int] ): __lowerCamelCase = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size __lowerCamelCase = 10_00 __lowerCamelCase = '''huggingface/label-files''' __lowerCamelCase = '''imagenet-1k-id2label.json''' __lowerCamelCase = json.load(open(hf_hub_download(_UpperCamelCase ,_UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) ) __lowerCamelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __lowerCamelCase = idalabel __lowerCamelCase = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": __lowerCamelCase = [3, 3, 6, 4] __lowerCamelCase = [48, 56, 1_12, 2_20] elif swiftformer_name == "swiftformer_s": __lowerCamelCase = [3, 3, 9, 6] __lowerCamelCase = [48, 64, 1_68, 2_24] elif swiftformer_name == "swiftformer_l1": __lowerCamelCase = [4, 3, 10, 5] __lowerCamelCase = [48, 96, 1_92, 3_84] elif swiftformer_name == "swiftformer_l3": __lowerCamelCase = [4, 4, 12, 6] __lowerCamelCase = [64, 1_28, 3_20, 5_12] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('''https''' ): __lowerCamelCase = torch.hub.load_state_dict_from_url(_UpperCamelCase ,map_location='''cpu''' ,check_hash=_UpperCamelCase ) else: __lowerCamelCase = torch.load(_UpperCamelCase ,map_location='''cpu''' ) __lowerCamelCase = checkpoint __lowerCamelCase = create_rename_keys(_UpperCamelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) # load HuggingFace model __lowerCamelCase = SwiftFormerForImageClassification(_UpperCamelCase ).eval() hf_model.load_state_dict(_UpperCamelCase ) # prepare test inputs __lowerCamelCase = prepare_img() __lowerCamelCase = ViTImageProcessor.from_pretrained('''preprocessor_config''' ) __lowerCamelCase = processor(images=_UpperCamelCase ,return_tensors='''pt''' ) # compare outputs from both models __lowerCamelCase = get_expected_output(_UpperCamelCase ) __lowerCamelCase = hf_model(inputs['''pixel_values'''] ).logits assert hf_logits.shape == torch.Size([1, 10_00] ) assert torch.allclose(hf_logits[0, 0:5] ,_UpperCamelCase ,atol=1e-3 ) Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swiftformer_name""", default="""swiftformer_xs""", choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""], type=str, help="""Name of the SwiftFormer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""./converted_outputs/""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""") a_ = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
330
def a__ ( _UpperCamelCase : int ): __lowerCamelCase = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
330
1
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : str ,_UpperCamelCase : Optional[int] ): # Initialise PyTorch model __lowerCamelCase = BertConfig.from_json_file(_UpperCamelCase ) print(F"""Building PyTorch model from configuration: {config}""" ) __lowerCamelCase = BertForPreTraining(_UpperCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_bert(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() ,_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
330
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ = 16 a_ = 32 def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : int = 16 ): __lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' ) def tokenize_function(_UpperCamelCase : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCamelCase = datasets.map( _UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' ) def collate_fn(_UpperCamelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCamelCase = 16 elif accelerator.mixed_precision != "no": __lowerCamelCase = 8 else: __lowerCamelCase = None return tokenizer.pad( _UpperCamelCase ,padding='''longest''' ,max_length=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_tensors='''pt''' ,) # Instantiate dataloaders. __lowerCamelCase = DataLoader( tokenized_datasets['''train'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) __lowerCamelCase = DataLoader( tokenized_datasets['''validation'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders a_ = mocked_dataloaders # noqa: F811 def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,_UpperCamelCase ) == "1": __lowerCamelCase = 2 # Initialize accelerator __lowerCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCamelCase = config['''lr'''] __lowerCamelCase = int(config['''num_epochs'''] ) __lowerCamelCase = int(config['''seed'''] ) __lowerCamelCase = int(config['''batch_size'''] ) __lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=_UpperCamelCase ) def inner_training_loop(_UpperCamelCase : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(_UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCamelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCamelCase = AdamW(params=model.parameters() ,lr=_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = get_dataloaders(_UpperCamelCase ,_UpperCamelCase ) # Instantiate scheduler __lowerCamelCase = get_linear_schedule_with_warmup( optimizer=_UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCamelCase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) # Now we train the model for epoch in range(_UpperCamelCase ): model.train() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.loss accelerator.backward(_UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.logits.argmax(dim=-1 ) __lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_UpperCamelCase ,references=_UpperCamelCase ,) __lowerCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" ,_UpperCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a__ ( ): __lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' ,) parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_UpperCamelCase ,_UpperCamelCase ) if __name__ == "__main__": main()
330
1
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""image_processor""", """tokenizer"""] lowerCAmelCase__ = """Pix2StructImageProcessor""" lowerCAmelCase__ = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = False super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 2048 , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None and not self.image_processor.is_vqa: __lowerCamelCase = self.tokenizer __lowerCamelCase = self.tokenizer( text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values __lowerCamelCase = self.image_processor( __UpperCAmelCase , return_tensors=__UpperCAmelCase , max_patches=__UpperCAmelCase , **__UpperCAmelCase ) else: # add pixel_values and bbox __lowerCamelCase = self.image_processor( __UpperCAmelCase , return_tensors=__UpperCAmelCase , max_patches=__UpperCAmelCase , header_text=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and not self.image_processor.is_vqa: __lowerCamelCase = self.tokenizer( text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , ) if "attention_mask" in text_encoding: __lowerCamelCase = text_encoding.pop('''attention_mask''' ) if "input_ids" in text_encoding: __lowerCamelCase = text_encoding.pop('''input_ids''' ) else: __lowerCamelCase = None if text_encoding is not None: encoding_image_processor.update(__UpperCAmelCase ) return encoding_image_processor def lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.tokenizer.model_input_names __lowerCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
330
import logging import os import threading import time try: import warnings except ImportError: a_ = None try: import msvcrt except ImportError: a_ = None try: import fcntl except ImportError: a_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: a_ = OSError # Data # ------------------------------------------------ a_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] a_ = """3.0.12""" a_ = None def a__ ( ): global _logger __lowerCamelCase = _logger or logging.getLogger(__name__ ) return _logger class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock_file return None def __str__( self ): '''simple docstring''' __lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class __lowerCAmelCase : def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock return None def __enter__( self ): '''simple docstring''' return self.lock def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.lock.release() return None class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase ) # The path to the lock file. __lowerCamelCase = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __lowerCamelCase = None # The default timeout value. __lowerCamelCase = timeout # We use this lock primarily for the lock counter. __lowerCamelCase = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __lowerCamelCase = 0 return None @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file @property def lowerCamelCase ( self ): '''simple docstring''' return self._timeout @timeout.setter def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = float(__UpperCAmelCase ) return None def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file_fd is not None def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ): '''simple docstring''' # Use the default timeout, if no timeout is provided. if timeout is None: __lowerCamelCase = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file __lowerCamelCase = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(__UpperCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __lowerCamelCase = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowerCamelCase ( self , __UpperCAmelCase=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __lowerCamelCase = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self ): '''simple docstring''' self.acquire() return self def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.release() return None def __del__( self ): '''simple docstring''' self.release(force=__UpperCAmelCase ) return None def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = os.path.basename(__UpperCAmelCase ) if len(__UpperCAmelCase ) > max_length and max_length > 0: __lowerCamelCase = os.path.dirname(__UpperCAmelCase ) __lowerCamelCase = str(hash(__UpperCAmelCase ) ) __lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(__UpperCAmelCase , __UpperCAmelCase ) else: return path class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) __lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: try: msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(__UpperCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) try: fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN ) os.close(__UpperCAmelCase ) return None class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' os.close(self._lock_file_fd ) __lowerCamelCase = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None a_ = None if msvcrt: a_ = WindowsFileLock elif fcntl: a_ = UnixFileLock else: a_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
330
1
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """efficientformer""" def __init__( self , __UpperCAmelCase = [3, 2, 6, 4] , __UpperCAmelCase = [48, 96, 224, 448] , __UpperCAmelCase = [True, True, True, True] , __UpperCAmelCase = 448 , __UpperCAmelCase = 32 , __UpperCAmelCase = 4 , __UpperCAmelCase = 7 , __UpperCAmelCase = 5 , __UpperCAmelCase = 8 , __UpperCAmelCase = 4 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 16 , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = 2 , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 1 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = 1E-5 , __UpperCAmelCase = "gelu" , __UpperCAmelCase = 0.02 , __UpperCAmelCase = 1E-1_2 , __UpperCAmelCase = 224 , __UpperCAmelCase = 1E-0_5 , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = hidden_sizes __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = depths __lowerCamelCase = mlp_expansion_ratio __lowerCamelCase = downsamples __lowerCamelCase = dim __lowerCamelCase = key_dim __lowerCamelCase = attention_ratio __lowerCamelCase = resolution __lowerCamelCase = pool_size __lowerCamelCase = downsample_patch_size __lowerCamelCase = downsample_stride __lowerCamelCase = downsample_pad __lowerCamelCase = drop_path_rate __lowerCamelCase = num_metaad_blocks __lowerCamelCase = distillation __lowerCamelCase = use_layer_scale __lowerCamelCase = layer_scale_init_value __lowerCamelCase = image_size __lowerCamelCase = batch_norm_eps
330
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase="divided_space_time" , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = num_channels __lowerCamelCase = patch_size __lowerCamelCase = num_frames __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = attention_type __lowerCamelCase = initializer_range __lowerCamelCase = scope __lowerCamelCase = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __lowerCamelCase = (image_size // patch_size) ** 2 __lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __lowerCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __lowerCamelCase = self.num_labels return config def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) # verify the logits shape __lowerCamelCase = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs __lowerCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowerCAmelCase__ = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerModelTester(self ) __lowerCamelCase = ConfigTester( self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = copy.deepcopy(__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def lowerCamelCase ( self ): '''simple docstring''' pass def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = TimesformerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' if not self.has_attentions: pass else: __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase = True for model_class in self.all_model_classes: __lowerCamelCase = self.model_tester.seq_length __lowerCamelCase = self.model_tester.num_frames __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __lowerCamelCase = len(__UpperCAmelCase ) # Check attention is always last and order is fine __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(out_len + 1 , len(__UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowerCamelCase ( self ): '''simple docstring''' def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.hidden_states __lowerCamelCase = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) __lowerCamelCase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def a__ ( ): __lowerCamelCase = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' ) __lowerCamelCase = np.load(_UpperCamelCase ) return list(_UpperCamelCase ) @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self ): '''simple docstring''' # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( __UpperCAmelCase ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_video() __lowerCamelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**__UpperCAmelCase ) # verify the logits __lowerCamelCase = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
1
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a_ = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["""memory_attention""", """encoder_attn"""], ["""attention""", """attn"""], ["""/""", """."""], [""".LayerNorm.gamma""", """_layer_norm.weight"""], [""".LayerNorm.beta""", """_layer_norm.bias"""], ["""r.layer_""", """r.layers."""], ["""output_proj""", """out_proj"""], ["""ffn.dense_1.""", """fc2."""], ["""ffn.dense.""", """fc1."""], ["""ffn_layer_norm""", """final_layer_norm"""], ["""kernel""", """weight"""], ["""encoder_layer_norm.""", """encoder.layer_norm."""], ["""decoder_layer_norm.""", """decoder.layer_norm."""], ["""embeddings.weights""", """shared.weight"""], ] def a__ ( _UpperCamelCase : int ): for pegasus_name, hf_name in PATTERNS: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = DEFAULTS.copy() cfg_kwargs.update(_UpperCamelCase ) __lowerCamelCase = PegasusConfig(**_UpperCamelCase ) __lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.model.state_dict() __lowerCamelCase = {} for k, v in tf_weights.items(): __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ) if new_k not in sd: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if "dense" in k or "proj" in new_k: __lowerCamelCase = v.T __lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}""" # make sure embedding.padding_idx is respected __lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] ) __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping} mapping.update(**_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''Adafactor''', '''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # save tokenizer first __lowerCamelCase = Path(_UpperCamelCase ).parent.name __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings'''] __lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(_UpperCamelCase ) # convert model __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""] if dataset == "large": __lowerCamelCase = task_specific_params __lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() sd.pop('''model.decoder.embed_positions.weight''' ) sd.pop('''model.encoder.embed_positions.weight''' ) torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() if args.save_dir is None: a_ = Path(args.tf_ckpt_path).parent.name a_ = os.path.join("""pegasus""", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
330
def a__ ( _UpperCamelCase : int ): if not isinstance(_UpperCamelCase ,_UpperCamelCase ): __lowerCamelCase = F"""Input value of [number={number}] must be an integer""" raise TypeError(_UpperCamelCase ) if number < 0: return False __lowerCamelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
330
1
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def a__ ( _UpperCamelCase : List[Any] ): __lowerCamelCase = [ '''decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_UpperCamelCase ,_UpperCamelCase ) def a__ ( _UpperCamelCase : Any ): __lowerCamelCase ,__lowerCamelCase = emb.weight.shape __lowerCamelCase = nn.Linear(_UpperCamelCase ,_UpperCamelCase ,bias=_UpperCamelCase ) __lowerCamelCase = emb.weight.data return lin_layer def a__ ( _UpperCamelCase : Optional[int] ): __lowerCamelCase = torch.load(_UpperCamelCase ,map_location='''cpu''' ) __lowerCamelCase = Namespace(**checkpoint['''cfg''']['''model'''] ) __lowerCamelCase = checkpoint['''model'''] remove_ignore_keys_(_UpperCamelCase ) __lowerCamelCase = state_dict['''decoder.embed_tokens.weight'''].shape[0] __lowerCamelCase = {key.replace('''decoder''' ,'''model''' ): val for key, val in state_dict.items()} __lowerCamelCase = XGLMConfig( vocab_size=_UpperCamelCase ,max_position_embeddings=args.max_target_positions ,num_layers=args.decoder_layers ,attention_heads=args.decoder_attention_heads ,ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.decoder_embed_dim ,layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function='''gelu''' ,scale_embedding=not args.no_scale_embedding ,tie_word_embeddings=args.share_decoder_input_output_embed ,) __lowerCamelCase = XGLMForCausalLM(_UpperCamelCase ) __lowerCamelCase = model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) print(_UpperCamelCase ) __lowerCamelCase = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() a_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
330
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={"_".join([str(__UpperCAmelCase ) for s in shape] )}.npy""" def lowerCamelCase ( self ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 4, 64, 64) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return image def lowerCamelCase ( self , __UpperCAmelCase=False , __UpperCAmelCase="CompVis/stable-diffusion-v1-4" ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = '''bf16''' if fpaa else None __lowerCamelCase ,__lowerCamelCase = FlaxUNetaDConditionModel.from_pretrained( __UpperCAmelCase , subfolder='''unet''' , dtype=__UpperCAmelCase , revision=__UpperCAmelCase ) return model, params def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 77, 768) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , shape=(4, 4, 96, 96) , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , shape=(4, 77, 1024) , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
330
1
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids __lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids __lowerCamelCase = shift_tokens_right(__UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id ) __lowerCamelCase = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits __lowerCamelCase = optax.softmax_cross_entropy(__UpperCAmelCase , onehot(__UpperCAmelCase , logits.shape[-1] ) ).mean() __lowerCamelCase = -(labels.shape[-1] * loss.item()) __lowerCamelCase = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
330
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
1
from __future__ import annotations import numpy as np def a__ ( _UpperCamelCase : list[float] ): return np.maximum(0 ,_UpperCamelCase ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
330
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def a__ ( _UpperCamelCase : Optional[int] ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __lowerCamelCase = module __lowerCamelCase = nn.Sequential( nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , ) __lowerCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowerCamelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module lowerCAmelCase__ = """bigscience/bloom-1b7""" # Constant values lowerCAmelCase__ = 2.1_09_65_95_52_69_25_74 lowerCAmelCase__ = """Hello my name is""" lowerCAmelCase__ = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) lowerCAmelCase__ = 1_0 def lowerCamelCase ( self ): '''simple docstring''' # Models and tokenizer __lowerCamelCase = AutoTokenizer.from_pretrained(self.model_name ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # Models and tokenizer __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='''auto''' ) __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_abit.config self.assertTrue(hasattr(__UpperCAmelCase , '''quantization_config''' ) ) __lowerCamelCase = config.to_dict() __lowerCamelCase = config.to_diff_dict() __lowerCamelCase = config.to_json_string() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit __lowerCamelCase = self.model_fpaa.get_memory_footprint() __lowerCamelCase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __lowerCamelCase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__UpperCAmelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() __lowerCamelCase = True __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = model_abit_from_config.generate( input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() with self.assertRaises(__UpperCAmelCase ): __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ): # Tries with `str` self.model_abit.to('''cpu''' ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device('''cuda:0''' ) ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_fpaa.to(torch.floataa ) __lowerCamelCase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.to('''cpu''' ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.half() # Check this does not throw an error __lowerCamelCase = self.model_fpaa.float() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls ): '''simple docstring''' __lowerCamelCase = '''t5-small''' __lowerCamelCase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense __lowerCamelCase = AutoTokenizer.from_pretrained(cls.model_name ) __lowerCamelCase = '''Translate in German: Hello, my dog is cute''' def lowerCamelCase ( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaForConditionalGeneration __lowerCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules __lowerCamelCase = None # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) __lowerCamelCase = modules def lowerCamelCase ( self ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # model_name __lowerCamelCase = '''bigscience/bloom-560m''' __lowerCamelCase = '''t5-small''' # Different types of model __lowerCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Sequence classification model __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # CausalLM model __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Seq2seq model __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = pipeline( '''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __lowerCamelCase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''balanced''' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) # Second real batch __lowerCamelCase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''facebook/opt-350m''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ): return # Step 1: freeze all parameters __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __lowerCamelCase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __lowerCamelCase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__UpperCAmelCase ) ): __lowerCamelCase = LoRALayer(module.q_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.k_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __lowerCamelCase = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __lowerCamelCase = model.forward(**__UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__UpperCAmelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """gpt2-xl""" lowerCAmelCase__ = 3.31_91_85_48_54_15_21_87
330
1
import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = 0 lowerCAmelCase__ = False lowerCAmelCase__ = 3.0 class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self ): '''simple docstring''' # If no defaults are changed, `to_kwargs` returns an empty dict. self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {'''a''': 2, '''b''': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} ) @require_cuda def lowerCamelCase ( self ): '''simple docstring''' # If no defaults are changed, `to_kwargs` returns an empty dict. __lowerCamelCase = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() __lowerCamelCase = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) __lowerCamelCase = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1_024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , __UpperCAmelCase ) @require_multi_gpu def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": a_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) a_ = Accelerator(kwargs_handlers=[ddp_scaler]) a_ = torch.nn.Linear(100, 200) a_ = accelerator.prepare(model) # Check the values changed in kwargs a_ = """""" a_ = model.bucket_bytes_cap // (1_024 * 1_024) if observed_bucket_cap_map != 15: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
330
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = 42 class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ = True @register_to_config def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (64,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 4 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 0.18_215 , ): '''simple docstring''' super().__init__() # pass init params to Encoder __lowerCamelCase = Encoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , ) # pass init params to Decoder __lowerCamelCase = Decoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , ) __lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __lowerCamelCase = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 ) __lowerCamelCase = False __lowerCamelCase = False # only relevant if vae tiling is enabled __lowerCamelCase = self.config.sample_size __lowerCamelCase = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __lowerCamelCase = 0.25 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' if isinstance(__UpperCAmelCase , (Encoder, Decoder) ): __lowerCamelCase = value def lowerCamelCase ( self , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = use_tiling def lowerCamelCase ( self ): '''simple docstring''' self.enable_tiling(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = True def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = {} def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): __lowerCamelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return processors def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = len(self.attn_processors.keys() ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count: raise ValueError( F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the""" F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): module.set_processor(__UpperCAmelCase ) else: module.set_processor(processor.pop(F"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) if self.use_slicing and x.shape[0] > 1: __lowerCamelCase = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_slicing and z.shape[0] > 1: __lowerCamelCase = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self._decode(__UpperCAmelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[2] , b.shape[2] , __UpperCAmelCase ) for y in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[3] , b.shape[3] , __UpperCAmelCase ) for x in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __lowerCamelCase = [] for i in range(0 , x.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , x.shape[3] , __UpperCAmelCase ): __lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __lowerCamelCase = [] for i in range(0 , z.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , z.shape[3] , __UpperCAmelCase ): __lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ): '''simple docstring''' __lowerCamelCase = sample __lowerCamelCase = self.encode(__UpperCAmelCase ).latent_dist if sample_posterior: __lowerCamelCase = posterior.sample(generator=__UpperCAmelCase ) else: __lowerCamelCase = posterior.mode() __lowerCamelCase = self.decode(__UpperCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase )
330
1
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={"_".join([str(__UpperCAmelCase ) for s in shape] )}.npy""" def lowerCamelCase ( self ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 4, 64, 64) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return image def lowerCamelCase ( self , __UpperCAmelCase=False , __UpperCAmelCase="CompVis/stable-diffusion-v1-4" ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = '''bf16''' if fpaa else None __lowerCamelCase ,__lowerCamelCase = FlaxUNetaDConditionModel.from_pretrained( __UpperCAmelCase , subfolder='''unet''' , dtype=__UpperCAmelCase , revision=__UpperCAmelCase ) return model, params def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 77, 768) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , shape=(4, 4, 96, 96) , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , shape=(4, 77, 1024) , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
330
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration a_ = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] a_ = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] a_ = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) a_ = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) a_ = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ): for tf_name, hf_name in patterns: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase ) __lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() __lowerCamelCase = {} # separating decoder weights __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = DECODER_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = REMAINING_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" __lowerCamelCase = mapping['''model.embed_positions.weight'''] __lowerCamelCase = mapping.pop('''model.embed_positions.weight''' ) __lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : int ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ): __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() a_ = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
330
1
def a__ ( _UpperCamelCase : int ): if divisor % 5 == 0 or divisor % 2 == 0: return 0 __lowerCamelCase = 1 __lowerCamelCase = 1 while repunit: __lowerCamelCase = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def a__ ( _UpperCamelCase : int = 1_00_00_00 ): __lowerCamelCase = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(_UpperCamelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f"{solution() = }")
330
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch a_ = logging.get_logger(__name__) class __lowerCAmelCase : def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase=None ): '''simple docstring''' if not conversation_id: __lowerCamelCase = uuid.uuida() if past_user_inputs is None: __lowerCamelCase = [] if generated_responses is None: __lowerCamelCase = [] __lowerCamelCase = conversation_id __lowerCamelCase = past_user_inputs __lowerCamelCase = generated_responses __lowerCamelCase = text def __eq__( self , __UpperCAmelCase ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) __lowerCamelCase = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: __lowerCamelCase = text def lowerCamelCase ( self ): '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __lowerCamelCase = None def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' self.generated_responses.append(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): '''simple docstring''' __lowerCamelCase = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): __lowerCamelCase = '''user''' if is_user else '''bot''' output += F"""{name} >> {text} \n""" return output @add_end_docstrings( lowerCAmelCase__ , r""" min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. """ , ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: __lowerCamelCase = self.tokenizer.eos_token def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = {} __lowerCamelCase = {} __lowerCamelCase = {} if min_length_for_response is not None: __lowerCamelCase = min_length_for_response if minimum_tokens is not None: __lowerCamelCase = minimum_tokens if "max_length" in generate_kwargs: __lowerCamelCase = generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __lowerCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self , __UpperCAmelCase , __UpperCAmelCase=0 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = super().__call__(__UpperCAmelCase , num_workers=__UpperCAmelCase , **__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=32 ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ): __lowerCamelCase = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version __lowerCamelCase = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": __lowerCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": __lowerCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=10 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) __lowerCamelCase = model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) __lowerCamelCase = max_length - minimum_tokens __lowerCamelCase = model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: __lowerCamelCase = model_inputs['''attention_mask'''][:, -trim:] __lowerCamelCase = model_inputs.pop('''conversation''' ) __lowerCamelCase = max_length __lowerCamelCase = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase ) if self.model.config.is_encoder_decoder: __lowerCamelCase = 1 else: __lowerCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = model_outputs['''output_ids'''] __lowerCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , ) __lowerCamelCase = model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.tokenizer.eos_token_id __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: __lowerCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
330
1
import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py a_ = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. a_ = direct_transformers_import(PATH_TO_TRANSFORMERS) a_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING a_ = { # used to compute the property `self.chunk_length` """EncodecConfig""": ["""overlap"""], # used as `self.bert_model = BertModel(config, ...)` """DPRConfig""": True, # not used in modeling files, but it's an important information """FSMTConfig""": ["""langs"""], # used internally in the configuration class file """GPTNeoConfig""": ["""attention_types"""], # used internally in the configuration class file """EsmConfig""": ["""is_folding_model"""], # used during training (despite we don't have training script for these models yet) """Mask2FormerConfig""": ["""ignore_value"""], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) """OneFormerConfig""": ["""ignore_value""", """norm"""], # used during preprocessing and collation, see `collating_graphormer.py` """GraphormerConfig""": ["""spatial_pos_max"""], # used internally in the configuration class file """T5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally """MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], """UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], # used internally in the configuration class file """LongT5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file """SwitchTransformersConfig""": ["""feed_forward_proj"""], # having default values other than `1e-5` - we can't fix them without breaking """BioGptConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """GLPNConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """SegformerConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """CvtConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """PerceiverConfig""": ["""layer_norm_eps"""], # used internally to calculate the feature size """InformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate `mlp_dim` """SamVisionConfig""": ["""mlp_ratio"""], # For (head) training, but so far not implemented """ClapAudioConfig""": ["""num_classes"""], # Not used, but providing useful information to users """SpeechT5HifiGanConfig""": ["""sampling_rate"""], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { """CLIPSegConfig""": True, """DeformableDetrConfig""": True, """DetaConfig""": True, """DinatConfig""": True, """DonutSwinConfig""": True, """EfficientFormerConfig""": True, """FSMTConfig""": True, """JukeboxConfig""": True, """LayoutLMv2Config""": True, """MaskFormerSwinConfig""": True, """MT5Config""": True, """NatConfig""": True, """OneFormerConfig""": True, """PerceiverConfig""": True, """RagConfig""": True, """SpeechT5Config""": True, """SwinConfig""": True, """Swin2SRConfig""": True, """Swinv2Config""": True, """SwitchTransformersConfig""": True, """TableTransformerConfig""": True, """TapasConfig""": True, """TransfoXLConfig""": True, """UniSpeechConfig""": True, """UniSpeechSatConfig""": True, """WavLMConfig""": True, """WhisperConfig""": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) """JukeboxPriorConfig""": True, # TODO: @Younes (for `is_decoder`) """Pix2StructTextConfig""": True, } ) def a__ ( _UpperCamelCase : str ,_UpperCamelCase : int ,_UpperCamelCase : List[Any] ,_UpperCamelCase : Dict ): __lowerCamelCase = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F"""config.{attribute}""" in modeling_source or F"""getattr(config, \"{attribute}\"""" in modeling_source or F"""getattr(self.config, \"{attribute}\"""" in modeling_source ): __lowerCamelCase = True # Deal with multi-line cases elif ( re.search( RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" ,_UpperCamelCase ,) is not None ): __lowerCamelCase = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: __lowerCamelCase = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files __lowerCamelCase = [ '''bos_index''', '''eos_index''', '''pad_index''', '''unk_index''', '''mask_index''', '''image_size''', '''use_cache''', '''out_features''', '''out_indices''', ] __lowerCamelCase = ['''encoder_no_repeat_ngram_size'''] # Special cases to be allowed __lowerCamelCase = True if not attribute_used: __lowerCamelCase = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: __lowerCamelCase = True elif attribute in ["tie_word_embeddings"] and default_value is False: __lowerCamelCase = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: __lowerCamelCase = True elif attribute.endswith('''_token_id''' ): __lowerCamelCase = True # configuration class specific cases if not case_allowed: __lowerCamelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ ,[] ) __lowerCamelCase = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def a__ ( _UpperCamelCase : str ): __lowerCamelCase = dict(inspect.signature(config_class.__init__ ).parameters ) __lowerCamelCase = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']] __lowerCamelCase = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass __lowerCamelCase = {} if len(config_class.attribute_map ) > 0: __lowerCamelCase = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files __lowerCamelCase = inspect.getsourcefile(_UpperCamelCase ) __lowerCamelCase = os.path.dirname(_UpperCamelCase ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. __lowerCamelCase = [os.path.join(_UpperCamelCase ,_UpperCamelCase ) for fn in os.listdir(_UpperCamelCase ) if fn.startswith('''modeling_''' )] # Get the source code strings __lowerCamelCase = [] for path in modeling_paths: if os.path.isfile(_UpperCamelCase ): with open(_UpperCamelCase ) as fp: modeling_sources.append(fp.read() ) __lowerCamelCase = [] for config_param, default_value in zip(_UpperCamelCase ,_UpperCamelCase ): # `attributes` here is all the variant names for `config_param` __lowerCamelCase = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ): unused_attributes.append(attributes[0] ) return sorted(_UpperCamelCase ) def a__ ( ): __lowerCamelCase = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) __lowerCamelCase = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) ,lambda _UpperCamelCase : inspect.isclass(_UpperCamelCase ) and issubclass(_UpperCamelCase ,_UpperCamelCase ) and inspect.getmodule(_UpperCamelCase ) == inspect.getmodule(_config_class ) ,) ] for config_class in config_classes_in_module: __lowerCamelCase = check_config_attributes_being_used(_UpperCamelCase ) if len(_UpperCamelCase ) > 0: __lowerCamelCase = unused_attributes if len(_UpperCamelCase ) > 0: __lowerCamelCase = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n''' for name, attributes in configs_with_unused_attributes.items(): error += F"""{name}: {attributes}\n""" raise ValueError(_UpperCamelCase ) if __name__ == "__main__": check_config_attributes()
330
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a_ = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["""memory_attention""", """encoder_attn"""], ["""attention""", """attn"""], ["""/""", """."""], [""".LayerNorm.gamma""", """_layer_norm.weight"""], [""".LayerNorm.beta""", """_layer_norm.bias"""], ["""r.layer_""", """r.layers."""], ["""output_proj""", """out_proj"""], ["""ffn.dense_1.""", """fc2."""], ["""ffn.dense.""", """fc1."""], ["""ffn_layer_norm""", """final_layer_norm"""], ["""kernel""", """weight"""], ["""encoder_layer_norm.""", """encoder.layer_norm."""], ["""decoder_layer_norm.""", """decoder.layer_norm."""], ["""embeddings.weights""", """shared.weight"""], ] def a__ ( _UpperCamelCase : int ): for pegasus_name, hf_name in PATTERNS: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = DEFAULTS.copy() cfg_kwargs.update(_UpperCamelCase ) __lowerCamelCase = PegasusConfig(**_UpperCamelCase ) __lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.model.state_dict() __lowerCamelCase = {} for k, v in tf_weights.items(): __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ) if new_k not in sd: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if "dense" in k or "proj" in new_k: __lowerCamelCase = v.T __lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}""" # make sure embedding.padding_idx is respected __lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] ) __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping} mapping.update(**_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''Adafactor''', '''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # save tokenizer first __lowerCamelCase = Path(_UpperCamelCase ).parent.name __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings'''] __lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(_UpperCamelCase ) # convert model __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""] if dataset == "large": __lowerCamelCase = task_specific_params __lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() sd.pop('''model.decoder.embed_positions.weight''' ) sd.pop('''model.encoder.embed_positions.weight''' ) torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() if args.save_dir is None: a_ = Path(args.tf_ckpt_path).parent.name a_ = os.path.join("""pegasus""", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
330
1
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys a_ = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
330
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } a_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ): for attribute in key.split('''.''' ): __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ) if weight_type is not None: __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape else: __lowerCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value else: __lowerCamelCase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Any ): __lowerCamelCase = [] __lowerCamelCase = fairseq_model.state_dict() __lowerCamelCase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,) __lowerCamelCase = True else: for key, mapped_key in MAPPING.items(): __lowerCamelCase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2] __lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase ) if "weight_g" in name: __lowerCamelCase = '''weight_g''' elif "weight_v" in name: __lowerCamelCase = '''weight_v''' elif "bias" in name: __lowerCamelCase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCamelCase = '''weight''' else: __lowerCamelCase = None set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) continue if not is_used: unused_weights.append(_UpperCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ): __lowerCamelCase = full_name.split('''conv_layers.''' )[-1] __lowerCamelCase = name.split('''.''' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_UpperCamelCase ) @torch.no_grad() def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : List[Any]=True ): if config_path is not None: __lowerCamelCase = UniSpeechSatConfig.from_pretrained(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatConfig() __lowerCamelCase = '''''' if is_finetuned: __lowerCamelCase = UniSpeechSatForCTC(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatForPreTraining(_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) __lowerCamelCase = model[0].eval() recursively_load_weights(_UpperCamelCase ,_UpperCamelCase ) hf_wavavec.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) a_ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
330
1
from PIL import Image def a__ ( _UpperCamelCase : Image ,_UpperCamelCase : float ): def brightness(_UpperCamelCase : int ) -> float: return 1_28 + level + (c - 1_28) if not -255.0 <= level <= 255.0: raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' ) return img.point(_UpperCamelCase ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 a_ = change_brightness(img, 100) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
330
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING a_ = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase__ ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type(__UpperCAmelCase ) def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , **__UpperCAmelCase ): '''simple docstring''' return {}, {}, {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = load_image(__UpperCAmelCase ) __lowerCamelCase = image.size __lowerCamelCase = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework ) return model_inputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.model(**__UpperCAmelCase ) return model_outputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = model_outputs.predicted_depth __lowerCamelCase = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=__UpperCAmelCase ) __lowerCamelCase = prediction.squeeze().cpu().numpy() __lowerCamelCase = (output * 255 / np.max(__UpperCAmelCase )).astype('''uint8''' ) __lowerCamelCase = Image.fromarray(__UpperCAmelCase ) __lowerCamelCase = {} __lowerCamelCase = predicted_depth __lowerCamelCase = depth return output_dict
330
1
a_ = [ """DownloadConfig""", """DownloadManager""", """DownloadMode""", """StreamingDownloadManager""", ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
330
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""pixel_values"""] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __lowerCamelCase = size if size is not None else {'''shortest_edge''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __lowerCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = resample __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_normalize __lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD __lowerCamelCase = do_convert_rgb def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowerCamelCase = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = size if size is not None else self.size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = resample if resample is not None else self.resample __lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCamelCase = crop_size if crop_size is not None else self.crop_size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase = image_mean if image_mean is not None else self.image_mean __lowerCamelCase = image_std if image_std is not None else self.image_std __lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowerCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowerCamelCase = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: __lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: __lowerCamelCase = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: __lowerCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] __lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __lowerCamelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
330
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json""" ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """roformer""" def __init__( self , __UpperCAmelCase=50000 , __UpperCAmelCase=None , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1536 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0 , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size if embedding_size is None else embedding_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = rotary_value __lowerCamelCase = use_cache class __lowerCAmelCase ( lowerCAmelCase__ ): @property def lowerCamelCase ( self ): '''simple docstring''' if self.task == "multiple-choice": __lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCamelCase = {0: '''batch''', 1: '''sequence'''} __lowerCamelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
330
from __future__ import annotations from typing import Generic, TypeVar a_ = TypeVar("""T""") class __lowerCAmelCase ( Generic[T] ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = data __lowerCamelCase = self __lowerCamelCase = 0 class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # map from node name to the node object __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # create a new set with x as its member __lowerCamelCase = DisjointSetTreeNode(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # find the set x belongs to (with path-compression) __lowerCamelCase = self.map[data] if elem_ref != elem_ref.parent: __lowerCamelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # helper function for union operation if nodea.rank > nodea.rank: __lowerCamelCase = nodea else: __lowerCamelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # merge 2 disjoint sets self.link(self.find_set(__UpperCAmelCase ) , self.find_set(__UpperCAmelCase ) ) class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # connections: map from the node to the neighbouring nodes (with weights) __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # add a node ONLY if its not present in the graph if node not in self.connections: __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # add an edge with the given weight self.add_node(__UpperCAmelCase ) self.add_node(__UpperCAmelCase ) __lowerCamelCase = weight __lowerCamelCase = weight def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __UpperCAmelCase : x[2] ) # creating the disjoint set __lowerCamelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__UpperCAmelCase ) # MST generation __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = edges[index] index += 1 __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) disjoint_set.union(__UpperCAmelCase , __UpperCAmelCase ) return graph
330
1
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging a_ = logging.get_logger(__name__) class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""input_features""", """is_longer"""] def __init__( self , __UpperCAmelCase=64 , __UpperCAmelCase=48000 , __UpperCAmelCase=480 , __UpperCAmelCase=10 , __UpperCAmelCase=1024 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase = 0 , __UpperCAmelCase = 14000 , __UpperCAmelCase = None , __UpperCAmelCase = "fusion" , __UpperCAmelCase = "repeatpad" , **__UpperCAmelCase , ): '''simple docstring''' super().__init__( feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , ) __lowerCamelCase = top_db __lowerCamelCase = truncation __lowerCamelCase = padding __lowerCamelCase = fft_window_size __lowerCamelCase = (fft_window_size >> 1) + 1 __lowerCamelCase = hop_length __lowerCamelCase = max_length_s __lowerCamelCase = max_length_s * sampling_rate __lowerCamelCase = sampling_rate __lowerCamelCase = frequency_min __lowerCamelCase = frequency_max __lowerCamelCase = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__UpperCAmelCase , min_frequency=__UpperCAmelCase , max_frequency=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , norm=__UpperCAmelCase , mel_scale='''htk''' , ) __lowerCamelCase = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__UpperCAmelCase , min_frequency=__UpperCAmelCase , max_frequency=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , norm='''slaney''' , mel_scale='''slaney''' , ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = copy.deepcopy(self.__dict__ ) __lowerCamelCase = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __lowerCamelCase = spectrogram( __UpperCAmelCase , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__UpperCAmelCase , log_mel='''dB''' , ) return log_mel_spectrogram.T def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk __lowerCamelCase = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk __lowerCamelCase = [0] # randomly choose index for each part __lowerCamelCase = np.random.choice(ranges[0] ) __lowerCamelCase = np.random.choice(ranges[1] ) __lowerCamelCase = np.random.choice(ranges[2] ) __lowerCamelCase = mel[idx_front : idx_front + chunk_frames, :] __lowerCamelCase = mel[idx_middle : idx_middle + chunk_frames, :] __lowerCamelCase = mel[idx_back : idx_back + chunk_frames, :] __lowerCamelCase = torch.tensor(mel[None, None, :] ) __lowerCamelCase = torch.nn.functional.interpolate( __UpperCAmelCase , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=__UpperCAmelCase ) __lowerCamelCase = mel_shrink[0][0].numpy() __lowerCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": __lowerCamelCase = True # random crop to max_length (for compatibility) -> this should be handled by self.pad __lowerCamelCase = len(__UpperCAmelCase ) - max_length __lowerCamelCase = np.random.randint(0 , overflow + 1 ) __lowerCamelCase = waveform[idx : idx + max_length] __lowerCamelCase = self._np_extract_fbank_features(__UpperCAmelCase , self.mel_filters_slaney )[None, :] elif truncation == "fusion": __lowerCamelCase = self._np_extract_fbank_features(__UpperCAmelCase , self.mel_filters ) __lowerCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed __lowerCamelCase = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. __lowerCamelCase = np.stack([mel, mel, mel, mel] , axis=0 ) __lowerCamelCase = False else: __lowerCamelCase = self._random_mel_fusion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = True else: raise NotImplementedError(F"""data_truncating {truncation} not implemented""" ) else: __lowerCamelCase = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": __lowerCamelCase = int(max_length / len(__UpperCAmelCase ) ) __lowerCamelCase = np.stack(np.tile(__UpperCAmelCase , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": __lowerCamelCase = int(max_length / len(__UpperCAmelCase ) ) __lowerCamelCase = np.stack(np.tile(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = np.pad(__UpperCAmelCase , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 ) if truncation == "fusion": __lowerCamelCase = self._np_extract_fbank_features(__UpperCAmelCase , self.mel_filters ) __lowerCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: __lowerCamelCase = self._np_extract_fbank_features(__UpperCAmelCase , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = truncation if truncation is not None else self.truncation __lowerCamelCase = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __lowerCamelCase = isinstance(__UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) __lowerCamelCase = is_batched_numpy or ( isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __lowerCamelCase = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ): __lowerCamelCase = np.asarray(__UpperCAmelCase , dtype=np.floataa ) elif isinstance(__UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __lowerCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __lowerCamelCase = [np.asarray(__UpperCAmelCase )] # convert to mel spectrogram, truncate and pad if needed. __lowerCamelCase = [ self._get_input_mel(__UpperCAmelCase , max_length if max_length else self.nb_max_samples , __UpperCAmelCase , __UpperCAmelCase ) for waveform in raw_speech ] __lowerCamelCase = [] __lowerCamelCase = [] for mel, longer in padded_inputs: input_mel.append(__UpperCAmelCase ) is_longer.append(__UpperCAmelCase ) if truncation == "fusion" and sum(__UpperCAmelCase ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer __lowerCamelCase = np.random.randint(0 , len(__UpperCAmelCase ) ) __lowerCamelCase = True if isinstance(input_mel[0] , __UpperCAmelCase ): __lowerCamelCase = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool __lowerCamelCase = [[longer] for longer in is_longer] __lowerCamelCase = {'''input_features''': input_mel, '''is_longer''': is_longer} __lowerCamelCase = BatchFeature(__UpperCAmelCase ) if return_tensors is not None: __lowerCamelCase = input_features.convert_to_tensors(__UpperCAmelCase ) return input_features
330
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self ): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_config() __lowerCamelCase = 300 return config def lowerCamelCase ( self ): '''simple docstring''' ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = self.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = True __lowerCamelCase = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_choices __lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = () def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCamelCase = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason='''MRA does not output attentions''' ) def lowerCamelCase ( self ): '''simple docstring''' return @require_torch class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) __lowerCamelCase = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
1
a_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] a_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] a_ = { 0: """Sunday""", 1: """Monday""", 2: """Tuesday""", 3: """Wednesday""", 4: """Thursday""", 5: """Friday""", 6: """Saturday""", } def a__ ( _UpperCamelCase : int ,_UpperCamelCase : int ,_UpperCamelCase : int ): assert len(str(_UpperCamelCase ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: __lowerCamelCase = year // 1_00 __lowerCamelCase = (5 * (century % 4) + 2) % 7 __lowerCamelCase = year % 1_00 __lowerCamelCase = centurian % 12 __lowerCamelCase = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 __lowerCamelCase = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) __lowerCamelCase = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
330
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
1
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = (DDPMScheduler,) def lowerCamelCase ( self , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**__UpperCAmelCase ) return config def lowerCamelCase ( self ): '''simple docstring''' for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' self.check_over_configs(thresholding=__UpperCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__UpperCAmelCase , prediction_type=__UpperCAmelCase , sample_max_value=__UpperCAmelCase , ) def lowerCamelCase ( self ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**__UpperCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**__UpperCAmelCase ) __lowerCamelCase = len(__UpperCAmelCase ) __lowerCamelCase = self.dummy_model() __lowerCamelCase = self.dummy_sample_deter __lowerCamelCase = torch.manual_seed(0 ) for t in reversed(range(__UpperCAmelCase ) ): # 1. predict noise residual __lowerCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 __lowerCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __lowerCamelCase = pred_prev_sample __lowerCamelCase = torch.sum(torch.abs(__UpperCAmelCase ) ) __lowerCamelCase = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_sum.item() - 258.9_606 ) < 1E-2 assert abs(result_mean.item() - 0.3_372 ) < 1E-3 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) __lowerCamelCase = scheduler_class(**__UpperCAmelCase ) __lowerCamelCase = len(__UpperCAmelCase ) __lowerCamelCase = self.dummy_model() __lowerCamelCase = self.dummy_sample_deter __lowerCamelCase = torch.manual_seed(0 ) for t in reversed(range(__UpperCAmelCase ) ): # 1. predict noise residual __lowerCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 __lowerCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __lowerCamelCase = pred_prev_sample __lowerCamelCase = torch.sum(torch.abs(__UpperCAmelCase ) ) __lowerCamelCase = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_sum.item() - 202.0_296 ) < 1E-2 assert abs(result_mean.item() - 0.2_631 ) < 1E-3 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**__UpperCAmelCase ) __lowerCamelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__UpperCAmelCase ) __lowerCamelCase = scheduler.timesteps for i, timestep in enumerate(__UpperCAmelCase ): if i == len(__UpperCAmelCase ) - 1: __lowerCamelCase = -1 else: __lowerCamelCase = timesteps[i + 1] __lowerCamelCase = scheduler.previous_timestep(__UpperCAmelCase ) __lowerCamelCase = prev_t.item() self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**__UpperCAmelCase ) __lowerCamelCase = [100, 87, 50, 51, 0] with self.assertRaises(__UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**__UpperCAmelCase ) __lowerCamelCase = [100, 87, 50, 1, 0] __lowerCamelCase = len(__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=__UpperCAmelCase , timesteps=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**__UpperCAmelCase ) __lowerCamelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( __UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=__UpperCAmelCase )
330
from string import ascii_lowercase, ascii_uppercase def a__ ( _UpperCamelCase : str ): if not sentence: return "" __lowerCamelCase = dict(zip(_UpperCamelCase ,_UpperCamelCase ) ) return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
330
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a_ = { """configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""VivitImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ """VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """VivitModel""", """VivitPreTrainedModel""", """VivitForVideoClassification""", ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __lowerCAmelCase ( lowerCAmelCase__ ): @slow @require_torch def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' ) __lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) __lowerCamelCase = bertabert.config.encoder.vocab_size __lowerCamelCase = tokenizer.sep_token_id __lowerCamelCase = tokenizer.cls_token_id __lowerCamelCase = 128 __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' ) __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' ) __lowerCamelCase = train_dataset.select(range(32 ) ) __lowerCamelCase = val_dataset.select(range(16 ) ) __lowerCamelCase = 4 def _map_to_encoder_decoder_inputs(__UpperCAmelCase ): # Tokenizer will automatically set [BOS] <text> [EOS] __lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 ) __lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 ) __lowerCamelCase = inputs.input_ids __lowerCamelCase = inputs.attention_mask __lowerCamelCase = outputs.input_ids __lowerCamelCase = outputs.input_ids.copy() __lowerCamelCase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels'''] ] __lowerCamelCase = outputs.attention_mask assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(__UpperCAmelCase ): __lowerCamelCase = pred.label_ids __lowerCamelCase = pred.predictions # all unnecessary tokens are removed __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase ) return {"accuracy": accuracy} # map train dataset __lowerCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) train_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) # same for validation dataset __lowerCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) val_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) __lowerCamelCase = self.get_auto_remove_tmp_dir() __lowerCamelCase = SeqaSeqTrainingArguments( output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __lowerCamelCase = SeqaSeqTrainer( model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , ) # start training trainer.train()
330
1
import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = """▁""" a_ = {"""vocab_file""": """prophetnet.tokenizer"""} a_ = { """vocab_file""": { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer""" ), } } a_ = { """microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False}, } a_ = { """microsoft/xprophetnet-large-wiki100-cased""": 512, } def a__ ( _UpperCamelCase : List[str] ): __lowerCamelCase = collections.OrderedDict() with open(_UpperCamelCase ,'''r''' ,encoding='''utf-8''' ) as reader: __lowerCamelCase = reader.readlines() for index, token in enumerate(_UpperCamelCase ): __lowerCamelCase = token.rstrip('''\n''' ) __lowerCamelCase = index return vocab class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ["""input_ids""", """attention_mask"""] def __init__( self , __UpperCAmelCase , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise __lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) __lowerCamelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab __lowerCamelCase = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4} for i in range(10 ): __lowerCamelCase = F"""[unused{i}]""" __lowerCamelCase = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab __lowerCamelCase = 12 __lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(__UpperCAmelCase ) def __getstate__( self ): '''simple docstring''' __lowerCamelCase = self.__dict__.copy() __lowerCamelCase = None return state def __setstate__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = d try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowerCamelCase = {} __lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return ([0] * len(__UpperCAmelCase )) + [1] return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1] def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __lowerCamelCase = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase ( self ): '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __lowerCamelCase = self.sp_model.PieceToId(__UpperCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = ''''''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ''' ''' ).strip() return out_string def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCamelCase = os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , '''wb''' ) as fi: __lowerCamelCase = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.sep_token_id] __lowerCamelCase = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
330
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
1
import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = ProphetNetTokenizer lowerCAmelCase__ = False def lowerCamelCase ( self ): '''simple docstring''' super().setUp() __lowerCamelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = '''UNwant\u00E9d,running''' __lowerCamelCase = '''unwanted, running''' return input_text, output_text def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.tokenizer_class(self.vocab_file ) __lowerCamelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BasicTokenizer(do_lower_case=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BasicTokenizer(do_lower_case=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BasicTokenizer(do_lower_case=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BasicTokenizer(do_lower_case=__UpperCAmelCase , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __lowerCamelCase = {} for i, token in enumerate(__UpperCAmelCase ): __lowerCamelCase = i __lowerCamelCase = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) @require_torch def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) __lowerCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] __lowerCamelCase = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] __lowerCamelCase = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='''pt''' ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def lowerCamelCase ( self ): '''simple docstring''' self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def lowerCamelCase ( self ): '''simple docstring''' self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def lowerCamelCase ( self ): '''simple docstring''' self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) __lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase ) __lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase ) __lowerCamelCase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase ) __lowerCamelCase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
330
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=1000 ): '''simple docstring''' __lowerCamelCase = p_stop __lowerCamelCase = max_length def __iter__( self ): '''simple docstring''' __lowerCamelCase = 0 __lowerCamelCase = False while not stop and count < self.max_length: yield count count += 1 __lowerCamelCase = random.random() < self.p_stop class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = [ BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 ) ] __lowerCamelCase = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] __lowerCamelCase = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ): '''simple docstring''' random.seed(__UpperCAmelCase ) __lowerCamelCase = list(__UpperCAmelCase ) __lowerCamelCase = [ IterableDatasetShard( __UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , ) for i in range(__UpperCAmelCase ) ] __lowerCamelCase = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(__UpperCAmelCase ) iterable_dataset_lists.append(list(__UpperCAmelCase ) ) __lowerCamelCase = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size __lowerCamelCase = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 ) __lowerCamelCase = [] for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(__UpperCAmelCase ) < len(__UpperCAmelCase ): reference += reference self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = 42 __lowerCamelCase = RandomIterableDataset() self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) # Edge case with a very small dataset __lowerCamelCase = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = SkipBatchSampler(__UpperCAmelCase , 2 ) self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 ) __lowerCamelCase = skip_first_batches(__UpperCAmelCase , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def lowerCamelCase ( self ): '''simple docstring''' Accelerator() __lowerCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
330
1
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS a_ = logging.get_logger(__name__) a_ = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, """constant""": get_constant_schedule, """constant_w_warmup""": get_constant_schedule_with_warmup, } class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) if config is None: assert isinstance(self.model , __UpperCAmelCase ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F""" {self.model.__class__}""" ) __lowerCamelCase = self.model.config else: __lowerCamelCase = config __lowerCamelCase = data_args __lowerCamelCase = self.config.tgt_vocab_size if isinstance(self.config , __UpperCAmelCase ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for""" ''' padding..''' ) if self.args.label_smoothing == 0: __lowerCamelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss __lowerCamelCase = label_smoothed_nll_loss def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' if self.optimizer is None: __lowerCamelCase = ['''bias''', '''LayerNorm.weight'''] __lowerCamelCase = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] __lowerCamelCase = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: __lowerCamelCase = Adafactor __lowerCamelCase = {'''scale_parameter''': False, '''relative_step''': False} else: __lowerCamelCase = AdamW __lowerCamelCase = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } __lowerCamelCase = self.args.learning_rate if self.sharded_ddp: __lowerCamelCase = OSS( params=__UpperCAmelCase , optim=__UpperCAmelCase , **__UpperCAmelCase , ) else: __lowerCamelCase = optimizer_cls(__UpperCAmelCase , **__UpperCAmelCase ) if self.lr_scheduler is None: __lowerCamelCase = self._get_lr_scheduler(__UpperCAmelCase ) else: # ignoring --lr_scheduler logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": __lowerCamelCase = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": __lowerCamelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: __lowerCamelCase = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__UpperCAmelCase ) return scheduler def lowerCamelCase ( self ): '''simple docstring''' if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token __lowerCamelCase = model(**__UpperCAmelCase , use_cache=__UpperCAmelCase )[0] __lowerCamelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models __lowerCamelCase ,__lowerCamelCase = model(**__UpperCAmelCase , labels=__UpperCAmelCase , use_cache=__UpperCAmelCase )[:2] else: # compute label smoothed loss __lowerCamelCase = model(**__UpperCAmelCase , use_cache=__UpperCAmelCase )[0] __lowerCamelCase = torch.nn.functional.log_softmax(__UpperCAmelCase , dim=-1 ) __lowerCamelCase ,__lowerCamelCase = self.loss_fn(__UpperCAmelCase , __UpperCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = inputs.pop('''labels''' ) __lowerCamelCase ,__lowerCamelCase = self._compute_loss(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return loss def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , ): '''simple docstring''' __lowerCamelCase = self._prepare_inputs(__UpperCAmelCase ) __lowerCamelCase = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: __lowerCamelCase = self.model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **__UpperCAmelCase , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: __lowerCamelCase = self._pad_tensors_to_max_len(__UpperCAmelCase , gen_kwargs['''max_length'''] ) __lowerCamelCase = inputs.pop('''labels''' ) with torch.no_grad(): # compute loss on predict data __lowerCamelCase ,__lowerCamelCase = self._compute_loss(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) __lowerCamelCase = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: __lowerCamelCase = self._pad_tensors_to_max_len(__UpperCAmelCase , gen_kwargs['''max_length'''] ) return (loss, logits, labels) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # If PAD token is not defined at least EOS token has to be defined __lowerCamelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( '''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be''' F""" padded to `max_length`={max_length}""" ) __lowerCamelCase = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) __lowerCamelCase = tensor return padded_tensor
330
def a__ ( _UpperCamelCase : int ): __lowerCamelCase = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
330
1
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version a_ = logging.getLogger(__name__) require_version("""pytorch_lightning>=1.0.4""") a_ = { """base""": AutoModel, """sequence-classification""": AutoModelForSequenceClassification, """question-answering""": AutoModelForQuestionAnswering, """pretraining""": AutoModelForPreTraining, """token-classification""": AutoModelForTokenClassification, """language-modeling""": AutoModelWithLMHead, """summarization""": AutoModelForSeqaSeqLM, """translation""": AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization a_ = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } a_ = sorted(arg_to_scheduler.keys()) a_ = """{""" + """, """.join(arg_to_scheduler_choices) + """}""" class __lowerCAmelCase ( pl.LightningModule ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="base" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(__UpperCAmelCase ) __lowerCamelCase = 0 __lowerCamelCase = Path(self.hparams.output_dir ) __lowerCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __lowerCamelCase = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , ) else: __lowerCamelCase = config __lowerCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ): assert hasattr(self.config , __UpperCAmelCase ), F"""model config doesn't have a `{p}` attribute""" setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) ) if tokenizer is None: __lowerCamelCase = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , ) else: __lowerCamelCase = tokenizer __lowerCamelCase = MODEL_MODES[mode] if model is None: __lowerCamelCase = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , ) else: __lowerCamelCase = model def lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = arg_to_scheduler[self.hparams.lr_scheduler] __lowerCamelCase = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __lowerCamelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model __lowerCamelCase = ['''bias''', '''LayerNorm.weight'''] __lowerCamelCase = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: __lowerCamelCase = Adafactor( __UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase ) else: __lowerCamelCase = AdamW( __UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __lowerCamelCase = optimizer __lowerCamelCase = self.get_lr_scheduler() return [optimizer], [scheduler] def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return self.validation_step(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' return self.validation_end(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __lowerCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' if stage == "test": __lowerCamelCase = len(self.test_dataloader().dataset ) else: __lowerCamelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase ) __lowerCamelCase = len(self.train_dataloader().dataset ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' raise NotImplementedError('''You must implement this for your task''' ) def lowerCamelCase ( self ): '''simple docstring''' return self.train_loader def lowerCamelCase ( self ): '''simple docstring''' return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( __UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.output_dir.joinpath('''best_tfmr''' ) __lowerCamelCase = self.step_count self.model.save_pretrained(__UpperCAmelCase ) self.tokenizer.save_pretrained(__UpperCAmelCase ) @staticmethod def lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' parser.add_argument( '''--model_name_or_path''' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=__UpperCAmelCase , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(__UpperCAmelCase ).parent / '''test_run''' / '''cache''' ) , type=__UpperCAmelCase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=__UpperCAmelCase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=__UpperCAmelCase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=__UpperCAmelCase , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=__UpperCAmelCase , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=__UpperCAmelCase , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=__UpperCAmelCase , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=__UpperCAmelCase , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=__UpperCAmelCase , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=__UpperCAmelCase , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=__UpperCAmelCase ) parser.add_argument('''--train_batch_size''' , default=32 , type=__UpperCAmelCase ) parser.add_argument('''--eval_batch_size''' , default=32 , type=__UpperCAmelCase ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class __lowerCAmelCase ( pl.Callback ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class __lowerCAmelCase ( pl.Callback ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(__UpperCAmelCase ) class __lowerCAmelCase ( pl.Callback ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = trainer.lr_schedulers[0]['''scheduler'''] __lowerCamelCase = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' rank_zero_info('''***** Validation results *****''' ) __lowerCamelCase = trainer.callback_metrics # Log results for key in sorted(__UpperCAmelCase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(__UpperCAmelCase , str(metrics[key] ) ) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' rank_zero_info('''***** Test results *****''' ) __lowerCamelCase = trainer.callback_metrics # Log and save results to file __lowerCamelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(__UpperCAmelCase , '''w''' ) as writer: for key in sorted(__UpperCAmelCase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(__UpperCAmelCase , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(__UpperCAmelCase , str(metrics[key] ) ) ) def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Dict ): # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( '''--output_dir''' ,default=str(Path(_UpperCamelCase ).parent / '''test_run''' / '''model_checkpoints''' ) ,type=_UpperCamelCase ,help='''The output directory where the model predictions and checkpoints will be written.''' ,) parser.add_argument( '''--fp16''' ,action='''store_true''' ,help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' ,) parser.add_argument( '''--fp16_opt_level''' ,type=_UpperCamelCase ,default='''O2''' ,help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) ,) parser.add_argument('''--n_tpu_cores''' ,dest='''tpu_cores''' ,type=_UpperCamelCase ) parser.add_argument('''--max_grad_norm''' ,dest='''gradient_clip_val''' ,default=1.0 ,type=_UpperCamelCase ,help='''Max gradient norm''' ) parser.add_argument('''--do_train''' ,action='''store_true''' ,help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' ,action='''store_true''' ,help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' ,dest='''accumulate_grad_batches''' ,type=_UpperCamelCase ,default=1 ,help='''Number of updates steps to accumulate before performing a backward/update pass.''' ,) parser.add_argument('''--seed''' ,type=_UpperCamelCase ,default=42 ,help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' ,default=str(Path(_UpperCamelCase ).parent / '''test_run''' / '''dummy-train-data''' ) ,type=_UpperCamelCase ,help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' ,) def a__ ( _UpperCamelCase : BaseTransformer ,_UpperCamelCase : argparse.Namespace ,_UpperCamelCase : int=None ,_UpperCamelCase : int=True ,_UpperCamelCase : str=[] ,_UpperCamelCase : Dict=None ,_UpperCamelCase : Dict=None ,**_UpperCamelCase : Tuple ,): pl.seed_everything(args.seed ) # init model __lowerCamelCase = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=_UpperCamelCase ) # add custom checkpoints if checkpoint_callback is None: __lowerCamelCase = pl.callbacks.ModelCheckpoint( filepath=args.output_dir ,prefix='''checkpoint''' ,monitor='''val_loss''' ,mode='''min''' ,save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(_UpperCamelCase ) if logging_callback is None: __lowerCamelCase = LoggingCallback() __lowerCamelCase = {} if args.fpaa: __lowerCamelCase = 16 if args.gpus > 1: __lowerCamelCase = '''auto''' __lowerCamelCase = '''ddp''' __lowerCamelCase = args.accumulate_grad_batches __lowerCamelCase = None __lowerCamelCase = '''auto''' __lowerCamelCase = pl.Trainer.from_argparse_args( _UpperCamelCase ,weights_summary=_UpperCamelCase ,callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] ,logger=_UpperCamelCase ,val_check_interval=1 ,num_sanity_val_steps=2 ,**_UpperCamelCase ,) if args.do_train: trainer.fit(_UpperCamelCase ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
330
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ = 16 a_ = 32 def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : int = 16 ): __lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' ) def tokenize_function(_UpperCamelCase : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCamelCase = datasets.map( _UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' ) def collate_fn(_UpperCamelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCamelCase = 16 elif accelerator.mixed_precision != "no": __lowerCamelCase = 8 else: __lowerCamelCase = None return tokenizer.pad( _UpperCamelCase ,padding='''longest''' ,max_length=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_tensors='''pt''' ,) # Instantiate dataloaders. __lowerCamelCase = DataLoader( tokenized_datasets['''train'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) __lowerCamelCase = DataLoader( tokenized_datasets['''validation'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders a_ = mocked_dataloaders # noqa: F811 def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,_UpperCamelCase ) == "1": __lowerCamelCase = 2 # Initialize accelerator __lowerCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCamelCase = config['''lr'''] __lowerCamelCase = int(config['''num_epochs'''] ) __lowerCamelCase = int(config['''seed'''] ) __lowerCamelCase = int(config['''batch_size'''] ) __lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=_UpperCamelCase ) def inner_training_loop(_UpperCamelCase : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(_UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCamelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCamelCase = AdamW(params=model.parameters() ,lr=_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = get_dataloaders(_UpperCamelCase ,_UpperCamelCase ) # Instantiate scheduler __lowerCamelCase = get_linear_schedule_with_warmup( optimizer=_UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCamelCase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) # Now we train the model for epoch in range(_UpperCamelCase ): model.train() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.loss accelerator.backward(_UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.logits.argmax(dim=-1 ) __lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_UpperCamelCase ,references=_UpperCamelCase ,) __lowerCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" ,_UpperCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a__ ( ): __lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' ,) parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_UpperCamelCase ,_UpperCamelCase ) if __name__ == "__main__": main()
330
1
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) __lowerCamelCase = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(__UpperCAmelCase ) from datasets import load_dataset __lowerCamelCase = load_dataset('''nielsr/rvlcdip-demo''' ) __lowerCamelCase = dataset['''train'''][0]['''image'''].convert('''RGB''' ) __lowerCamelCase = image_processor(__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**__UpperCAmelCase ) __lowerCamelCase = outputs.logits __lowerCamelCase = torch.Size((1, 16) ) self.assertEqual(logits.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=__UpperCAmelCase , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
import logging import os import threading import time try: import warnings except ImportError: a_ = None try: import msvcrt except ImportError: a_ = None try: import fcntl except ImportError: a_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: a_ = OSError # Data # ------------------------------------------------ a_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] a_ = """3.0.12""" a_ = None def a__ ( ): global _logger __lowerCamelCase = _logger or logging.getLogger(__name__ ) return _logger class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock_file return None def __str__( self ): '''simple docstring''' __lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class __lowerCAmelCase : def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock return None def __enter__( self ): '''simple docstring''' return self.lock def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.lock.release() return None class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase ) # The path to the lock file. __lowerCamelCase = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __lowerCamelCase = None # The default timeout value. __lowerCamelCase = timeout # We use this lock primarily for the lock counter. __lowerCamelCase = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __lowerCamelCase = 0 return None @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file @property def lowerCamelCase ( self ): '''simple docstring''' return self._timeout @timeout.setter def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = float(__UpperCAmelCase ) return None def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file_fd is not None def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ): '''simple docstring''' # Use the default timeout, if no timeout is provided. if timeout is None: __lowerCamelCase = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file __lowerCamelCase = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(__UpperCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __lowerCamelCase = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowerCamelCase ( self , __UpperCAmelCase=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __lowerCamelCase = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self ): '''simple docstring''' self.acquire() return self def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.release() return None def __del__( self ): '''simple docstring''' self.release(force=__UpperCAmelCase ) return None def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = os.path.basename(__UpperCAmelCase ) if len(__UpperCAmelCase ) > max_length and max_length > 0: __lowerCamelCase = os.path.dirname(__UpperCAmelCase ) __lowerCamelCase = str(hash(__UpperCAmelCase ) ) __lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(__UpperCAmelCase , __UpperCAmelCase ) else: return path class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) __lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: try: msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(__UpperCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) try: fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN ) os.close(__UpperCAmelCase ) return None class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' os.close(self._lock_file_fd ) __lowerCamelCase = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None a_ = None if msvcrt: a_ = WindowsFileLock elif fcntl: a_ = UnixFileLock else: a_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
330
1
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""input_ids""", """attention_mask"""] def __init__( self , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase=125 , __UpperCAmelCase=None , **__UpperCAmelCase , ): '''simple docstring''' # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: __lowerCamelCase = [F"""<extra_id_{i}>""" for i in range(__UpperCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens __lowerCamelCase = len(set(filter(lambda __UpperCAmelCase : bool('''extra_id''' in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" ''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the''' ''' extra_ids tokens''' ) __lowerCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token __lowerCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token __lowerCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token super().__init__( eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , ) __lowerCamelCase = extra_ids __lowerCamelCase = 2**8 # utf is 8 bits # define special tokens dict __lowerCamelCase = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } __lowerCamelCase = len(self.special_tokens_encoder ) __lowerCamelCase = len(__UpperCAmelCase ) for i, token in enumerate(__UpperCAmelCase ): __lowerCamelCase = self.vocab_size + i - n __lowerCamelCase = {v: k for k, v in self.special_tokens_encoder.items()} @property def lowerCamelCase ( self ): '''simple docstring''' return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(__UpperCAmelCase )) + [1] return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1] def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' if len(__UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" ''' eos tokens being added.''' ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __lowerCamelCase = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __lowerCamelCase = self._add_eos_if_not_present(__UpperCAmelCase ) if token_ids_a is None: return token_ids_a else: __lowerCamelCase = self._add_eos_if_not_present(__UpperCAmelCase ) return token_ids_a + token_ids_a def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = [chr(__UpperCAmelCase ) for i in text.encode('''utf-8''' )] return tokens def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' if token in self.special_tokens_encoder: __lowerCamelCase = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: __lowerCamelCase = self.added_tokens_encoder[token] elif len(__UpperCAmelCase ) != 1: __lowerCamelCase = self.unk_token_id else: __lowerCamelCase = ord(__UpperCAmelCase ) + self._num_special_tokens return token_id def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' if index in self.special_tokens_decoder: __lowerCamelCase = self.special_tokens_decoder[index] else: __lowerCamelCase = chr(index - self._num_special_tokens ) return token def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = b'''''' for token in tokens: if token in self.special_tokens_decoder: __lowerCamelCase = self.special_tokens_decoder[token].encode('''utf-8''' ) elif token in self.added_tokens_decoder: __lowerCamelCase = self.special_tokens_decoder[token].encode('''utf-8''' ) elif token in self.special_tokens_encoder: __lowerCamelCase = token.encode('''utf-8''' ) elif token in self.added_tokens_encoder: __lowerCamelCase = token.encode('''utf-8''' ) else: __lowerCamelCase = bytes([ord(__UpperCAmelCase )] ) bstring += tok_string __lowerCamelCase = bstring.decode('''utf-8''' , errors='''ignore''' ) return string def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' return ()
330
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase="divided_space_time" , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = num_channels __lowerCamelCase = patch_size __lowerCamelCase = num_frames __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = attention_type __lowerCamelCase = initializer_range __lowerCamelCase = scope __lowerCamelCase = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __lowerCamelCase = (image_size // patch_size) ** 2 __lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __lowerCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __lowerCamelCase = self.num_labels return config def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) # verify the logits shape __lowerCamelCase = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs __lowerCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowerCAmelCase__ = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerModelTester(self ) __lowerCamelCase = ConfigTester( self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = copy.deepcopy(__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def lowerCamelCase ( self ): '''simple docstring''' pass def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = TimesformerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' if not self.has_attentions: pass else: __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase = True for model_class in self.all_model_classes: __lowerCamelCase = self.model_tester.seq_length __lowerCamelCase = self.model_tester.num_frames __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __lowerCamelCase = len(__UpperCAmelCase ) # Check attention is always last and order is fine __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(out_len + 1 , len(__UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowerCamelCase ( self ): '''simple docstring''' def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.hidden_states __lowerCamelCase = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) __lowerCamelCase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def a__ ( ): __lowerCamelCase = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' ) __lowerCamelCase = np.load(_UpperCamelCase ) return list(_UpperCamelCase ) @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self ): '''simple docstring''' # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( __UpperCAmelCase ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_video() __lowerCamelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**__UpperCAmelCase ) # verify the logits __lowerCamelCase = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
1
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy a_ = logging.get_logger(__name__) a_ = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } a_ = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } a_ = { """jukebox""": 512, } class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_LYRIC_TOKENS_SIZES lowerCAmelCase__ = ["""input_ids""", """attention_mask"""] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=["v3", "v2", "v2"] , __UpperCAmelCase=512 , __UpperCAmelCase=5 , __UpperCAmelCase="<|endoftext|>" , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token super().__init__( unk_token=__UpperCAmelCase , n_genres=__UpperCAmelCase , version=__UpperCAmelCase , max_n_lyric_tokens=__UpperCAmelCase , **__UpperCAmelCase , ) __lowerCamelCase = version __lowerCamelCase = max_n_lyric_tokens __lowerCamelCase = n_genres with open(__UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle: __lowerCamelCase = json.load(__UpperCAmelCase ) with open(__UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle: __lowerCamelCase = json.load(__UpperCAmelCase ) with open(__UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle: __lowerCamelCase = json.load(__UpperCAmelCase ) __lowerCamelCase = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: __lowerCamelCase = oov.replace(r'''\-\'''' , r'''\-+\'''' ) __lowerCamelCase = regex.compile(__UpperCAmelCase ) __lowerCamelCase = {v: k for k, v in self.artists_encoder.items()} __lowerCamelCase = {v: k for k, v in self.genres_encoder.items()} __lowerCamelCase = {v: k for k, v in self.lyrics_encoder.items()} @property def lowerCamelCase ( self ): '''simple docstring''' return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def lowerCamelCase ( self ): '''simple docstring''' return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = [self.artists_encoder.get(__UpperCAmelCase , 0 ) for artist in list_artists] for genres in range(len(__UpperCAmelCase ) ): __lowerCamelCase = [self.genres_encoder.get(__UpperCAmelCase , 0 ) for genre in list_genres[genres]] __lowerCamelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) __lowerCamelCase = [[self.lyrics_encoder.get(__UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' return list(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = self.prepare_for_tokenization(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = self._tokenize(__UpperCAmelCase ) return artist, genre, lyrics def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' for idx in range(len(self.version ) ): if self.version[idx] == "v3": __lowerCamelCase = artists[idx].lower() __lowerCamelCase = [genres[idx].lower()] else: __lowerCamelCase = self._normalize(artists[idx] ) + '''.v2''' __lowerCamelCase = [ self._normalize(__UpperCAmelCase ) + '''.v2''' for genre in genres[idx].split('''_''' ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": __lowerCamelCase = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' ) __lowerCamelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n''' __lowerCamelCase = {vocab[index]: index + 1 for index in range(len(__UpperCAmelCase ) )} __lowerCamelCase = 0 __lowerCamelCase = len(__UpperCAmelCase ) + 1 __lowerCamelCase = self.vocab __lowerCamelCase = {v: k for k, v in self.vocab.items()} __lowerCamelCase = '''''' else: __lowerCamelCase = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' ) __lowerCamelCase = self._run_strip_accents(__UpperCAmelCase ) __lowerCamelCase = lyrics.replace('''\\''' , '''\n''' ) __lowerCamelCase = self.out_of_vocab.sub('''''' , __UpperCAmelCase ), [], [] return artists, genres, lyrics def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = unicodedata.normalize('''NFD''' , __UpperCAmelCase ) __lowerCamelCase = [] for char in text: __lowerCamelCase = unicodedata.category(__UpperCAmelCase ) if cat == "Mn": continue output.append(__UpperCAmelCase ) return "".join(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = ( [chr(__UpperCAmelCase ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )] + [chr(__UpperCAmelCase ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )] + [chr(__UpperCAmelCase ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )] + ['''.'''] ) __lowerCamelCase = frozenset(__UpperCAmelCase ) __lowerCamelCase = re.compile(r'''_+''' ) __lowerCamelCase = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] ) __lowerCamelCase = pattern.sub('''_''' , __UpperCAmelCase ).strip('''_''' ) return text def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' return " ".join(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ): '''simple docstring''' # Convert to TensorType if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = TensorType(__UpperCAmelCase ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( '''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' ) import tensorflow as tf __lowerCamelCase = tf.constant __lowerCamelCase = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' ) import torch __lowerCamelCase = torch.tensor __lowerCamelCase = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' ) import jax.numpy as jnp # noqa: F811 __lowerCamelCase = jnp.array __lowerCamelCase = _is_jax else: __lowerCamelCase = np.asarray __lowerCamelCase = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: __lowerCamelCase = [inputs] if not is_tensor(__UpperCAmelCase ): __lowerCamelCase = as_tensor(__UpperCAmelCase ) except: # noqa E722 raise ValueError( '''Unable to create tensor, you should probably activate truncation and/or padding ''' '''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' ) return inputs def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="" , __UpperCAmelCase="pt" ): '''simple docstring''' __lowerCamelCase = [0, 0, 0] __lowerCamelCase = [artist] * len(self.version ) __lowerCamelCase = [genres] * len(self.version ) __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = self.tokenize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = self._convert_token_to_id(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = [-INFINITY] * len(full_tokens[-1] ) __lowerCamelCase = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__UpperCAmelCase ) for i in range(len(self.version ) ) ] return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCamelCase = os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] ) with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=__UpperCAmelCase ) ) __lowerCamelCase = os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] ) with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=__UpperCAmelCase ) ) __lowerCamelCase = os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] ) with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__UpperCAmelCase ) ) return (artists_file, genres_file, lyrics_file) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.artists_decoder.get(__UpperCAmelCase ) __lowerCamelCase = [self.genres_decoder.get(__UpperCAmelCase ) for genre in genres_index] __lowerCamelCase = [self.lyrics_decoder.get(__UpperCAmelCase ) for character in lyric_index] return artist, genres, lyrics
330
def a__ ( _UpperCamelCase : int ): if not isinstance(_UpperCamelCase ,_UpperCamelCase ): __lowerCamelCase = F"""Input value of [number={number}] must be an integer""" raise TypeError(_UpperCamelCase ) if number < 0: return False __lowerCamelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
330
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 255 , __UpperCAmelCase=True , ): '''simple docstring''' # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = num_channels __lowerCamelCase = min_resolution __lowerCamelCase = max_resolution __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = do_normalize __lowerCamelCase = image_mean __lowerCamelCase = image_std __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_pad def lowerCamelCase ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' if not batched: __lowerCamelCase = image_inputs[0] if isinstance(__UpperCAmelCase , Image.Image ): __lowerCamelCase ,__lowerCamelCase = image.size else: __lowerCamelCase ,__lowerCamelCase = image.shape[1], image.shape[2] if w < h: __lowerCamelCase = int(self.size['''shortest_edge'''] * h / w ) __lowerCamelCase = self.size['''shortest_edge'''] elif w > h: __lowerCamelCase = self.size['''shortest_edge'''] __lowerCamelCase = int(self.size['''shortest_edge'''] * w / h ) else: __lowerCamelCase = self.size['''shortest_edge'''] __lowerCamelCase = self.size['''shortest_edge'''] else: __lowerCamelCase = [] for image in image_inputs: __lowerCamelCase ,__lowerCamelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __lowerCamelCase = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0] __lowerCamelCase = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DeformableDetrImageProcessingTester(self ) @property def lowerCamelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_rescale''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_pad''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , __UpperCAmelCase ) __lowerCamelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCAmelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' pass def lowerCamelCase ( self ): '''simple docstring''' # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , Image.Image ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase ) __lowerCamelCase = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase ( self ): '''simple docstring''' # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , np.ndarray ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCamelCase = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values __lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase ( self ): '''simple docstring''' # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , torch.Tensor ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCamelCase = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values __lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowerCamelCase ( self ): '''simple docstring''' # prepare image and target __lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: __lowerCamelCase = json.loads(f.read() ) __lowerCamelCase = {'''image_id''': 39769, '''annotations''': target} # encode them __lowerCamelCase = DeformableDetrImageProcessor() __lowerCamelCase = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='''pt''' ) # verify pixel values __lowerCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) ) # verify area __lowerCamelCase = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) ) # verify boxes __lowerCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1E-3 ) ) # verify image_id __lowerCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) ) # verify is_crowd __lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) ) # verify class_labels __lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) ) # verify orig_size __lowerCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) ) # verify size __lowerCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' # prepare image, target and masks_path __lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: __lowerCamelCase = json.loads(f.read() ) __lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} __lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them __lowerCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' ) __lowerCamelCase = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='''pt''' ) # verify pixel values __lowerCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) ) # verify area __lowerCamelCase = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) ) # verify boxes __lowerCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1E-3 ) ) # verify image_id __lowerCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) ) # verify is_crowd __lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) ) # verify class_labels __lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) ) # verify masks __lowerCamelCase = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __UpperCAmelCase ) # verify orig_size __lowerCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) ) # verify size __lowerCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) )
330
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={"_".join([str(__UpperCAmelCase ) for s in shape] )}.npy""" def lowerCamelCase ( self ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 4, 64, 64) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return image def lowerCamelCase ( self , __UpperCAmelCase=False , __UpperCAmelCase="CompVis/stable-diffusion-v1-4" ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = '''bf16''' if fpaa else None __lowerCamelCase ,__lowerCamelCase = FlaxUNetaDConditionModel.from_pretrained( __UpperCAmelCase , subfolder='''unet''' , dtype=__UpperCAmelCase , revision=__UpperCAmelCase ) return model, params def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 77, 768) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , shape=(4, 4, 96, 96) , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , shape=(4, 77, 1024) , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
330
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a_ = { """configuration_encodec""": [ """ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EncodecConfig""", ], """feature_extraction_encodec""": ["""EncodecFeatureExtractor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ """ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""", """EncodecModel""", """EncodecPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a_ = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def a__ ( _UpperCamelCase : Optional[int] ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __lowerCamelCase = module __lowerCamelCase = nn.Sequential( nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , ) __lowerCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowerCamelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module lowerCAmelCase__ = """bigscience/bloom-1b7""" # Constant values lowerCAmelCase__ = 2.1_09_65_95_52_69_25_74 lowerCAmelCase__ = """Hello my name is""" lowerCAmelCase__ = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) lowerCAmelCase__ = 1_0 def lowerCamelCase ( self ): '''simple docstring''' # Models and tokenizer __lowerCamelCase = AutoTokenizer.from_pretrained(self.model_name ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # Models and tokenizer __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='''auto''' ) __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_abit.config self.assertTrue(hasattr(__UpperCAmelCase , '''quantization_config''' ) ) __lowerCamelCase = config.to_dict() __lowerCamelCase = config.to_diff_dict() __lowerCamelCase = config.to_json_string() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit __lowerCamelCase = self.model_fpaa.get_memory_footprint() __lowerCamelCase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __lowerCamelCase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__UpperCAmelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() __lowerCamelCase = True __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = model_abit_from_config.generate( input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() with self.assertRaises(__UpperCAmelCase ): __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ): # Tries with `str` self.model_abit.to('''cpu''' ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device('''cuda:0''' ) ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_fpaa.to(torch.floataa ) __lowerCamelCase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.to('''cpu''' ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.half() # Check this does not throw an error __lowerCamelCase = self.model_fpaa.float() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls ): '''simple docstring''' __lowerCamelCase = '''t5-small''' __lowerCamelCase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense __lowerCamelCase = AutoTokenizer.from_pretrained(cls.model_name ) __lowerCamelCase = '''Translate in German: Hello, my dog is cute''' def lowerCamelCase ( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaForConditionalGeneration __lowerCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules __lowerCamelCase = None # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) __lowerCamelCase = modules def lowerCamelCase ( self ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # model_name __lowerCamelCase = '''bigscience/bloom-560m''' __lowerCamelCase = '''t5-small''' # Different types of model __lowerCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Sequence classification model __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # CausalLM model __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Seq2seq model __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = pipeline( '''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __lowerCamelCase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''balanced''' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) # Second real batch __lowerCamelCase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''facebook/opt-350m''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ): return # Step 1: freeze all parameters __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __lowerCamelCase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __lowerCamelCase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__UpperCAmelCase ) ): __lowerCamelCase = LoRALayer(module.q_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.k_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __lowerCamelCase = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __lowerCamelCase = model.forward(**__UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__UpperCAmelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """gpt2-xl""" lowerCAmelCase__ = 3.31_91_85_48_54_15_21_87
330
1
from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a_ = logging.get_logger(__name__) class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""input_features""", """attention_mask"""] def __init__( self , __UpperCAmelCase=80 , __UpperCAmelCase=16000 , __UpperCAmelCase=80 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase ) __lowerCamelCase = num_mel_bins __lowerCamelCase = do_ceptral_normalize __lowerCamelCase = normalize_means __lowerCamelCase = normalize_vars __lowerCamelCase = True def lowerCamelCase ( self , __UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers __lowerCamelCase = torch.from_numpy(__UpperCAmelCase ).unsqueeze(0 ) __lowerCamelCase = ta_kaldi.fbank(__UpperCAmelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = 0.0 , ): '''simple docstring''' # make sure we normalize float32 arrays if normalize_means: __lowerCamelCase = x[:input_length].mean(axis=0 ) __lowerCamelCase = np.subtract(__UpperCAmelCase , __UpperCAmelCase ) if normalize_vars: __lowerCamelCase = x[:input_length].std(axis=0 ) __lowerCamelCase = np.divide(__UpperCAmelCase , __UpperCAmelCase ) if input_length < x.shape[0]: __lowerCamelCase = padding_value # make sure array is in float32 __lowerCamelCase = x.astype(np.floataa ) return x def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __lowerCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__UpperCAmelCase , __UpperCAmelCase , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(__UpperCAmelCase , __UpperCAmelCase ) ] def __call__( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" F""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __lowerCamelCase = isinstance(__UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) __lowerCamelCase = is_batched_numpy or ( isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __lowerCamelCase = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ): __lowerCamelCase = np.asarray(__UpperCAmelCase , dtype=np.floataa ) elif isinstance(__UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __lowerCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __lowerCamelCase = [raw_speech] # extract fbank features __lowerCamelCase = [self._extract_fbank_features(__UpperCAmelCase ) for waveform in raw_speech] # convert into correct format for padding __lowerCamelCase = BatchFeature({'''input_features''': features} ) __lowerCamelCase = self.pad( __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , ) # make sure list is in array format __lowerCamelCase = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , __UpperCAmelCase ): __lowerCamelCase = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for feature in input_features] __lowerCamelCase = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: __lowerCamelCase = [np.asarray(__UpperCAmelCase , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __lowerCamelCase = ( np.array(__UpperCAmelCase , dtype=np.intaa ) if self._get_padding_strategies(__UpperCAmelCase , max_length=__UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) __lowerCamelCase = self.normalize( padded_inputs['''input_features'''] , attention_mask=__UpperCAmelCase ) if return_tensors is not None: __lowerCamelCase = padded_inputs.convert_to_tensors(__UpperCAmelCase ) return padded_inputs
330
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = 42 class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ = True @register_to_config def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (64,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 4 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 0.18_215 , ): '''simple docstring''' super().__init__() # pass init params to Encoder __lowerCamelCase = Encoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , ) # pass init params to Decoder __lowerCamelCase = Decoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , ) __lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __lowerCamelCase = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 ) __lowerCamelCase = False __lowerCamelCase = False # only relevant if vae tiling is enabled __lowerCamelCase = self.config.sample_size __lowerCamelCase = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __lowerCamelCase = 0.25 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' if isinstance(__UpperCAmelCase , (Encoder, Decoder) ): __lowerCamelCase = value def lowerCamelCase ( self , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = use_tiling def lowerCamelCase ( self ): '''simple docstring''' self.enable_tiling(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = True def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = {} def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): __lowerCamelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return processors def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = len(self.attn_processors.keys() ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count: raise ValueError( F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the""" F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): module.set_processor(__UpperCAmelCase ) else: module.set_processor(processor.pop(F"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) if self.use_slicing and x.shape[0] > 1: __lowerCamelCase = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_slicing and z.shape[0] > 1: __lowerCamelCase = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self._decode(__UpperCAmelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[2] , b.shape[2] , __UpperCAmelCase ) for y in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[3] , b.shape[3] , __UpperCAmelCase ) for x in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __lowerCamelCase = [] for i in range(0 , x.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , x.shape[3] , __UpperCAmelCase ): __lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __lowerCamelCase = [] for i in range(0 , z.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , z.shape[3] , __UpperCAmelCase ): __lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ): '''simple docstring''' __lowerCamelCase = sample __lowerCamelCase = self.encode(__UpperCAmelCase ).latent_dist if sample_posterior: __lowerCamelCase = posterior.sample(generator=__UpperCAmelCase ) else: __lowerCamelCase = posterior.mode() __lowerCamelCase = self.decode(__UpperCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase )
330
1
from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging a_ = logging.get_logger(__name__) class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""pixel_values"""] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 32 , __UpperCAmelCase=PILImageResampling.BILINEAR , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = do_resize __lowerCamelCase = do_rescale __lowerCamelCase = size_divisor __lowerCamelCase = resample super().__init__(**__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = get_image_size(__UpperCAmelCase ) # Rounds the height and width down to the closest multiple of size_divisor __lowerCamelCase = height // size_divisor * size_divisor __lowerCamelCase = width // size_divisor * size_divisor __lowerCamelCase = resize(__UpperCAmelCase , (new_h, new_w) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) return image def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ): '''simple docstring''' return rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = size_divisor if size_divisor is not None else self.size_divisor __lowerCamelCase = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) __lowerCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for img in images] if do_resize: __lowerCamelCase = [self.resize(__UpperCAmelCase , size_divisor=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(__UpperCAmelCase , scale=1 / 255 ) for image in images] __lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __lowerCamelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
330
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration a_ = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] a_ = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] a_ = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) a_ = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) a_ = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ): for tf_name, hf_name in patterns: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase ) __lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() __lowerCamelCase = {} # separating decoder weights __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = DECODER_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = REMAINING_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" __lowerCamelCase = mapping['''model.embed_positions.weight'''] __lowerCamelCase = mapping.pop('''model.embed_positions.weight''' ) __lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : int ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ): __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() a_ = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
330
1
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) a_ = """bert-base-cased""" a_ = """fp16""" a_ = """bf16""" a_ = [FPaa, BFaa] @require_fsdp @require_cuda class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() __lowerCamelCase = dict( ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , ) def lowerCamelCase ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__UpperCAmelCase ): __lowerCamelCase = self.dist_env.copy() __lowerCamelCase = F"""{i + 1}""" __lowerCamelCase = strategy with mockenv_context(**__UpperCAmelCase ): __lowerCamelCase = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) ) def lowerCamelCase ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__UpperCAmelCase ): __lowerCamelCase = self.dist_env.copy() __lowerCamelCase = prefetch_policy with mockenv_context(**__UpperCAmelCase ): __lowerCamelCase = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) ) def lowerCamelCase ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__UpperCAmelCase ): __lowerCamelCase = self.dist_env.copy() __lowerCamelCase = state_dict_type with mockenv_context(**__UpperCAmelCase ): __lowerCamelCase = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModel.from_pretrained(__UpperCAmelCase ) for policy in FSDP_AUTO_WRAP_POLICY: __lowerCamelCase = self.dist_env.copy() __lowerCamelCase = policy if policy == "TRANSFORMER_BASED_WRAP": __lowerCamelCase = '''BertLayer''' elif policy == "SIZE_BASED_WRAP": __lowerCamelCase = '''2000''' with mockenv_context(**__UpperCAmelCase ): __lowerCamelCase = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__UpperCAmelCase ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) __lowerCamelCase = self.dist_env.copy() __lowerCamelCase = '''TRANSFORMER_BASED_WRAP''' __lowerCamelCase = '''T5Layer''' with mockenv_context(**__UpperCAmelCase ): __lowerCamelCase = FullyShardedDataParallelPlugin() with self.assertRaises(__UpperCAmelCase ) as cm: fsdp_plugin.set_auto_wrap_policy(__UpperCAmelCase ) self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) ) __lowerCamelCase = self.dist_env.copy() __lowerCamelCase = '''SIZE_BASED_WRAP''' __lowerCamelCase = '''0''' with mockenv_context(**__UpperCAmelCase ): __lowerCamelCase = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__UpperCAmelCase ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def lowerCamelCase ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: __lowerCamelCase = self.dist_env.copy() __lowerCamelCase = mp_dtype with mockenv_context(**__UpperCAmelCase ): __lowerCamelCase = Accelerator() if mp_dtype == "fp16": __lowerCamelCase = torch.floataa elif mp_dtype == "bf16": __lowerCamelCase = torch.bfloataa __lowerCamelCase = MixedPrecision(param_dtype=__UpperCAmelCase , reduce_dtype=__UpperCAmelCase , buffer_dtype=__UpperCAmelCase ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __UpperCAmelCase ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , __UpperCAmelCase ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: __lowerCamelCase = self.dist_env.copy() __lowerCamelCase = str(__UpperCAmelCase ).lower() with mockenv_context(**__UpperCAmelCase ): __lowerCamelCase = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__UpperCAmelCase ) ) @require_fsdp @require_multi_gpu @slow class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() __lowerCamelCase = 0.82 __lowerCamelCase = [ '''fsdp_shard_grad_op_transformer_based_wrap''', '''fsdp_full_shard_transformer_based_wrap''', ] __lowerCamelCase = { '''multi_gpu_fp16''': 3200, '''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2000, '''fsdp_full_shard_transformer_based_wrap_fp16''': 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } __lowerCamelCase = 160 __lowerCamelCase = 160 __lowerCamelCase = inspect.getfile(accelerate.test_utils ) __lowerCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.path.join(self.test_scripts_folder , '''test_performance.py''' ) __lowerCamelCase = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp'''] for config in self.performance_configs: __lowerCamelCase = cmd.copy() for i, strategy in enumerate(__UpperCAmelCase ): if strategy.lower() in config: cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) break if "fp32" in config: cmd_config.append('''--mixed_precision=no''' ) else: cmd_config.append('''--mixed_precision=fp16''' ) if "cpu_offload" in config: cmd_config.append('''--fsdp_offload_params=True''' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('''--fsdp_min_num_params=2000''' ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", F"""--performance_lower_bound={self.performance_lower_bound}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' ) __lowerCamelCase = [ '''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''', '''--mixed_precision=fp16''', '''--fsdp_transformer_layer_cls_to_wrap=BertLayer''', ] for i, strategy in enumerate(__UpperCAmelCase ): __lowerCamelCase = cmd.copy() cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) if strategy != "FULL_SHARD": continue __lowerCamelCase = len(__UpperCAmelCase ) for state_dict_type in FSDP_STATE_DICT_TYPE: __lowerCamelCase = cmd_config[:state_dict_config_index] cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", '''--partial_train_epoch=1''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) __lowerCamelCase = cmd_config[:-1] __lowerCamelCase = os.path.join(self.tmpdir , '''epoch_0''' ) cmd_config.extend( [ F"""--resume_from_checkpoint={resume_from_checkpoint}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' ) __lowerCamelCase = [ '''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): __lowerCamelCase = cmd.copy() if "fp16" in spec: cmd_config.extend(['''--mixed_precision=fp16'''] ) else: cmd_config.extend(['''--mixed_precision=no'''] ) if "multi_gpu" in spec: continue else: cmd_config.extend(['''--use_fsdp'''] ) for i, strategy in enumerate(__UpperCAmelCase ): if strategy.lower() in spec: cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) break if "cpu_offload" in spec: cmd_config.append('''--fsdp_offload_params=True''' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('''--fsdp_min_num_params=2000''' ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", F"""--peak_memory_upper_bound={peak_mem_upper_bound}""", F"""--n_train={self.n_train}""", F"""--n_val={self.n_val}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
330
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch a_ = logging.get_logger(__name__) class __lowerCAmelCase : def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase=None ): '''simple docstring''' if not conversation_id: __lowerCamelCase = uuid.uuida() if past_user_inputs is None: __lowerCamelCase = [] if generated_responses is None: __lowerCamelCase = [] __lowerCamelCase = conversation_id __lowerCamelCase = past_user_inputs __lowerCamelCase = generated_responses __lowerCamelCase = text def __eq__( self , __UpperCAmelCase ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) __lowerCamelCase = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: __lowerCamelCase = text def lowerCamelCase ( self ): '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __lowerCamelCase = None def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' self.generated_responses.append(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): '''simple docstring''' __lowerCamelCase = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): __lowerCamelCase = '''user''' if is_user else '''bot''' output += F"""{name} >> {text} \n""" return output @add_end_docstrings( lowerCAmelCase__ , r""" min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. """ , ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: __lowerCamelCase = self.tokenizer.eos_token def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = {} __lowerCamelCase = {} __lowerCamelCase = {} if min_length_for_response is not None: __lowerCamelCase = min_length_for_response if minimum_tokens is not None: __lowerCamelCase = minimum_tokens if "max_length" in generate_kwargs: __lowerCamelCase = generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __lowerCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self , __UpperCAmelCase , __UpperCAmelCase=0 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = super().__call__(__UpperCAmelCase , num_workers=__UpperCAmelCase , **__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=32 ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ): __lowerCamelCase = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version __lowerCamelCase = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": __lowerCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": __lowerCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=10 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) __lowerCamelCase = model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) __lowerCamelCase = max_length - minimum_tokens __lowerCamelCase = model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: __lowerCamelCase = model_inputs['''attention_mask'''][:, -trim:] __lowerCamelCase = model_inputs.pop('''conversation''' ) __lowerCamelCase = max_length __lowerCamelCase = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase ) if self.model.config.is_encoder_decoder: __lowerCamelCase = 1 else: __lowerCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = model_outputs['''output_ids'''] __lowerCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , ) __lowerCamelCase = model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.tokenizer.eos_token_id __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: __lowerCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
330
1
import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def a__ ( _UpperCamelCase : list ,_UpperCamelCase : list ,_UpperCamelCase : list ,_UpperCamelCase : list ,_UpperCamelCase : list ): __lowerCamelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCamelCase )] ) __lowerCamelCase = np.array(_UpperCamelCase ) __lowerCamelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() ,_UpperCamelCase ) ) ,x.transpose() ) ,_UpperCamelCase ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def a__ ( _UpperCamelCase : list ,_UpperCamelCase : list ,_UpperCamelCase : list ): __lowerCamelCase = (1, 2, 1) __lowerCamelCase = (1, 1, 0, 7) __lowerCamelCase = SARIMAX( _UpperCamelCase ,exog=_UpperCamelCase ,order=_UpperCamelCase ,seasonal_order=_UpperCamelCase ) __lowerCamelCase = model.fit(disp=_UpperCamelCase ,maxiter=6_00 ,method='''nm''' ) __lowerCamelCase = model_fit.predict(1 ,len(_UpperCamelCase ) ,exog=[test_match] ) return result[0] def a__ ( _UpperCamelCase : list ,_UpperCamelCase : list ,_UpperCamelCase : list ): __lowerCamelCase = SVR(kernel='''rbf''' ,C=1 ,gamma=0.1 ,epsilon=0.1 ) regressor.fit(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = regressor.predict(_UpperCamelCase ) return y_pred[0] def a__ ( _UpperCamelCase : list ): train_user.sort() __lowerCamelCase = np.percentile(_UpperCamelCase ,25 ) __lowerCamelCase = np.percentile(_UpperCamelCase ,75 ) __lowerCamelCase = qa - qa __lowerCamelCase = qa - (iqr * 0.1) return low_lim def a__ ( _UpperCamelCase : list ,_UpperCamelCase : float ): __lowerCamelCase = 0 __lowerCamelCase = 0 for i in list_vote: if i > actual_result: __lowerCamelCase = not_safe + 1 else: if abs(abs(_UpperCamelCase ) - abs(_UpperCamelCase ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) a_ = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]] a_ = pd.DataFrame( data_input, columns=["""total_user""", """total_even""", """days"""] ) a_ = Normalizer().fit_transform(data_input_df.values) # split data a_ = normalize_df[:, 2].tolist() a_ = normalize_df[:, 0].tolist() a_ = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) a_ = normalize_df[:, [1, 2]].tolist() a_ = x[: len(x) - 1] a_ = x[len(x) - 1 :] # for linear regression & sarimax a_ = total_date[: len(total_date) - 1] a_ = total_user[: len(total_user) - 1] a_ = total_match[: len(total_match) - 1] a_ = total_date[len(total_date) - 1 :] a_ = total_user[len(total_user) - 1 :] a_ = total_match[len(total_match) - 1 :] # voting system with forecasting a_ = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data a_ = """""" if data_safety_checker(res_vote, tst_user) else """not """ print("""Today's data is {not_str}safe.""")
330
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a_ = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["""memory_attention""", """encoder_attn"""], ["""attention""", """attn"""], ["""/""", """."""], [""".LayerNorm.gamma""", """_layer_norm.weight"""], [""".LayerNorm.beta""", """_layer_norm.bias"""], ["""r.layer_""", """r.layers."""], ["""output_proj""", """out_proj"""], ["""ffn.dense_1.""", """fc2."""], ["""ffn.dense.""", """fc1."""], ["""ffn_layer_norm""", """final_layer_norm"""], ["""kernel""", """weight"""], ["""encoder_layer_norm.""", """encoder.layer_norm."""], ["""decoder_layer_norm.""", """decoder.layer_norm."""], ["""embeddings.weights""", """shared.weight"""], ] def a__ ( _UpperCamelCase : int ): for pegasus_name, hf_name in PATTERNS: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = DEFAULTS.copy() cfg_kwargs.update(_UpperCamelCase ) __lowerCamelCase = PegasusConfig(**_UpperCamelCase ) __lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.model.state_dict() __lowerCamelCase = {} for k, v in tf_weights.items(): __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ) if new_k not in sd: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if "dense" in k or "proj" in new_k: __lowerCamelCase = v.T __lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}""" # make sure embedding.padding_idx is respected __lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] ) __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping} mapping.update(**_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''Adafactor''', '''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # save tokenizer first __lowerCamelCase = Path(_UpperCamelCase ).parent.name __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings'''] __lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(_UpperCamelCase ) # convert model __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""] if dataset == "large": __lowerCamelCase = task_specific_params __lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() sd.pop('''model.decoder.embed_positions.weight''' ) sd.pop('''model.encoder.embed_positions.weight''' ) torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() if args.save_dir is None: a_ = Path(args.tf_ckpt_path).parent.name a_ = os.path.join("""pegasus""", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
330
1
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation a_ = logging.get_logger(__name__) a_ = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""} a_ = { """vocab_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""", }, """emoji_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""", }, } a_ = { """abeja/gpt-neox-japanese-2.7b""": 2_048, } def a__ ( _UpperCamelCase : int ,_UpperCamelCase : List[Any] ): with open(_UpperCamelCase ,'''r''' ,encoding='''utf-8''' ) as f: __lowerCamelCase = json.loads(f.read() ) __lowerCamelCase = collections.OrderedDict() __lowerCamelCase = collections.OrderedDict() __lowerCamelCase = collections.OrderedDict() with open(_UpperCamelCase ,'''r''' ,encoding='''utf-8''' ) as f: __lowerCamelCase = f.readlines() __lowerCamelCase = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token] for idx, b in enumerate(_UpperCamelCase ): __lowerCamelCase = b __lowerCamelCase = idx for wd in b: __lowerCamelCase = idx return vocab, raw_vocab, ids_to_tokens, emoji class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ["""input_ids""", """attention_mask"""] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|startoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ): '''simple docstring''' super().__init__( unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , do_clean_text=__UpperCAmelCase , **__UpperCAmelCase , ) if not os.path.isfile(__UpperCAmelCase ): raise ValueError( F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained""" ''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' ) if not os.path.isfile(__UpperCAmelCase ): raise ValueError( F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google""" ''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' ) __lowerCamelCase = do_clean_text __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = load_vocab_and_emoji(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def lowerCamelCase ( self ): '''simple docstring''' # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab return len(self.raw_vocab ) def lowerCamelCase ( self ): '''simple docstring''' return dict(self.raw_vocab , **self.added_tokens_encoder ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' return self.subword_tokenizer.tokenize(__UpperCAmelCase , clean=self.do_clean_text ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' return self.vocab.get(__UpperCAmelCase , self.vocab.get(self.unk_token ) ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' return self.subword_tokenizer.convert_id_to_token(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = ''''''.join(__UpperCAmelCase ).strip() return out_string def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] ) if len(__UpperCAmelCase ) > self.model_max_length: __lowerCamelCase = input_ids[-self.model_max_length :] return input_ids def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __lowerCamelCase = 0 if os.path.isdir(__UpperCAmelCase ): __lowerCamelCase = os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] ) else: __lowerCamelCase = ( (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = ( (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file'''] ) with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) __lowerCamelCase = token_index writer.write(''','''.join(__UpperCAmelCase ) + '''\n''' ) index += 1 with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: json.dump(self.emoji , __UpperCAmelCase ) return vocab_file, emoji_file class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = vocab # same as swe __lowerCamelCase = ids_to_tokens # same as bpe __lowerCamelCase = emoji __lowerCamelCase = np.max([len(__UpperCAmelCase ) for w in self.vocab.keys()] ) __lowerCamelCase = re.compile(r'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' ) __lowerCamelCase = re.compile(r'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' ) __lowerCamelCase = re.compile(r'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' ) __lowerCamelCase = re.compile( r'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' ) __lowerCamelCase = re.compile( r'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' ) __lowerCamelCase = re.compile( r'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' ) __lowerCamelCase = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿''' __lowerCamelCase = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟''' __lowerCamelCase = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} ) def __len__( self ): '''simple docstring''' return len(self.ids_to_tokens ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.content_repattera.sub('''<URL>''' , __UpperCAmelCase ) __lowerCamelCase = self.content_repattera.sub('''<EMAIL>''' , __UpperCAmelCase ) __lowerCamelCase = self.content_repattera.sub('''<TEL>''' , __UpperCAmelCase ) __lowerCamelCase = self.content_repattera.sub('''<DATE>''' , __UpperCAmelCase ) __lowerCamelCase = self.content_repattera.sub('''<DATE>''' , __UpperCAmelCase ) __lowerCamelCase = self.content_repattera.sub('''<PRICE>''' , __UpperCAmelCase ) __lowerCamelCase = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: __lowerCamelCase = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' ) return content def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = text.replace(''' ''' , '''<SP>''' ) __lowerCamelCase = text.replace(''' ''' , '''<SP>''' ) __lowerCamelCase = text.replace('''\r\n''' , '''<BR>''' ) __lowerCamelCase = text.replace('''\n''' , '''<BR>''' ) __lowerCamelCase = text.replace('''\r''' , '''<BR>''' ) __lowerCamelCase = text.replace('''\t''' , '''<TAB>''' ) __lowerCamelCase = text.replace('''—''' , '''ー''' ) __lowerCamelCase = text.replace('''−''' , '''ー''' ) for k, v in self.emoji["emoji"].items(): if k in text: __lowerCamelCase = text.replace(__UpperCAmelCase , __UpperCAmelCase ) if clean: __lowerCamelCase = self.clean_text(__UpperCAmelCase ) def check_simbol(__UpperCAmelCase ): __lowerCamelCase = x.encode() if len(__UpperCAmelCase ) == 1 and len(__UpperCAmelCase ) == 2: __lowerCamelCase = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0Xc_2a1 and c <= 0Xc_2bf) or (c >= 0Xc_780 and c <= 0Xc_783) or (c >= 0Xc_ab9 and c <= 0Xc_bbf) or (c >= 0Xc_c80 and c <= 0Xc_da2) ): return True return False def checkuae(__UpperCAmelCase ): __lowerCamelCase = x.encode() if len(__UpperCAmelCase ) == 1 and len(__UpperCAmelCase ) == 3: __lowerCamelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0Xe28_080 and c <= 0Xe2b_07f: return True return False __lowerCamelCase = 0 __lowerCamelCase = [] while pos < len(__UpperCAmelCase ): __lowerCamelCase = min(len(__UpperCAmelCase ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3 __lowerCamelCase = [] # (token_id, token, pos) for e in range(__UpperCAmelCase , __UpperCAmelCase , -1 ): __lowerCamelCase = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(__UpperCAmelCase ) > 2: __lowerCamelCase = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(__UpperCAmelCase ) > 0: # the smallest token_id is adopted __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[0] )[0] result.append(__UpperCAmelCase ) __lowerCamelCase = e else: __lowerCamelCase = pos + 1 __lowerCamelCase = text[pos:end] if check_simbol(__UpperCAmelCase ): result.append('''<KIGOU>''' ) elif checkuae(__UpperCAmelCase ): result.append('''<U2000U2BFF>''' ) else: for i in wd.encode('''utf-8''' ): result.append('''<|byte%d|>''' % i ) __lowerCamelCase = end return result def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase="\n" ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(__UpperCAmelCase ) > 0: words.append(bytearray(__UpperCAmelCase ).decode('''utf-8''' , errors='''replace''' ) ) __lowerCamelCase = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji['''emoji_inv'''][word] ) elif word == "<SP>": words.append(''' ''' ) elif word == "<BR>": words.append(__UpperCAmelCase ) elif word == "<TAB>": words.append('''\t''' ) elif word == "<BLOCK>": words.append('''▀''' ) elif word == "<KIGOU>": words.append('''ǀ''' ) elif word == "<U2000U2BFF>": words.append('''‖''' ) else: words.append(__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: words.append(bytearray(__UpperCAmelCase ).decode('''utf-8''' , errors='''replace''' ) ) __lowerCamelCase = ''''''.join(__UpperCAmelCase ) return text
330
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } a_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ): for attribute in key.split('''.''' ): __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ) if weight_type is not None: __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape else: __lowerCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value else: __lowerCamelCase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Any ): __lowerCamelCase = [] __lowerCamelCase = fairseq_model.state_dict() __lowerCamelCase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,) __lowerCamelCase = True else: for key, mapped_key in MAPPING.items(): __lowerCamelCase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2] __lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase ) if "weight_g" in name: __lowerCamelCase = '''weight_g''' elif "weight_v" in name: __lowerCamelCase = '''weight_v''' elif "bias" in name: __lowerCamelCase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCamelCase = '''weight''' else: __lowerCamelCase = None set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) continue if not is_used: unused_weights.append(_UpperCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ): __lowerCamelCase = full_name.split('''conv_layers.''' )[-1] __lowerCamelCase = name.split('''.''' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_UpperCamelCase ) @torch.no_grad() def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : List[Any]=True ): if config_path is not None: __lowerCamelCase = UniSpeechSatConfig.from_pretrained(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatConfig() __lowerCamelCase = '''''' if is_finetuned: __lowerCamelCase = UniSpeechSatForCTC(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatForPreTraining(_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) __lowerCamelCase = model[0].eval() recursively_load_weights(_UpperCamelCase ,_UpperCamelCase ) hf_wavavec.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) a_ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
330
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a_ = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING a_ = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase__ ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type(__UpperCAmelCase ) def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , **__UpperCAmelCase ): '''simple docstring''' return {}, {}, {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = load_image(__UpperCAmelCase ) __lowerCamelCase = image.size __lowerCamelCase = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework ) return model_inputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.model(**__UpperCAmelCase ) return model_outputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = model_outputs.predicted_depth __lowerCamelCase = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=__UpperCAmelCase ) __lowerCamelCase = prediction.squeeze().cpu().numpy() __lowerCamelCase = (output * 255 / np.max(__UpperCAmelCase )).astype('''uint8''' ) __lowerCamelCase = Image.fromarray(__UpperCAmelCase ) __lowerCamelCase = {} __lowerCamelCase = predicted_depth __lowerCamelCase = depth return output_dict
330
1
import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ = logging.getLogger(__name__) def a__ ( ): __lowerCamelCase = argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' ,type=_UpperCamelCase ,default='''data/dump.txt''' ,help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' ,type=_UpperCamelCase ,default='''bert''' ,choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' ,type=_UpperCamelCase ,default='''bert-base-uncased''' ,help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' ,type=_UpperCamelCase ,default='''data/dump''' ,help='''The dump file prefix.''' ) __lowerCamelCase = parser.parse_args() logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" ) if args.tokenizer_type == "bert": __lowerCamelCase = BertTokenizer.from_pretrained(args.tokenizer_name ) __lowerCamelCase = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` __lowerCamelCase = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": __lowerCamelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name ) __lowerCamelCase = tokenizer.special_tokens_map['''cls_token'''] # `<s>` __lowerCamelCase = tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": __lowerCamelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name ) __lowerCamelCase = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` __lowerCamelCase = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F"""Loading text from {args.file_path}""" ) with open(args.file_path ,'''r''' ,encoding='''utf8''' ) as fp: __lowerCamelCase = fp.readlines() logger.info('''Start encoding''' ) logger.info(F"""{len(_UpperCamelCase )} examples to process.""" ) __lowerCamelCase = [] __lowerCamelCase = 0 __lowerCamelCase = 1_00_00 __lowerCamelCase = time.time() for text in data: __lowerCamelCase = F"""{bos} {text.strip()} {sep}""" __lowerCamelCase = tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase ) rslt.append(_UpperCamelCase ) iter += 1 if iter % interval == 0: __lowerCamelCase = time.time() logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" ) __lowerCamelCase = time.time() logger.info('''Finished binarization''' ) logger.info(F"""{len(_UpperCamelCase )} examples processed.""" ) __lowerCamelCase = F"""{args.dump_file}.{args.tokenizer_name}.pickle""" __lowerCamelCase = tokenizer.vocab_size if vocab_size < (1 << 16): __lowerCamelCase = [np.uintaa(_UpperCamelCase ) for d in rslt] else: __lowerCamelCase = [np.intaa(_UpperCamelCase ) for d in rslt] random.shuffle(rslt_ ) logger.info(F"""Dump to {dp_file}""" ) with open(_UpperCamelCase ,'''wb''' ) as handle: pickle.dump(rslt_ ,_UpperCamelCase ,protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
330
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""pixel_values"""] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __lowerCamelCase = size if size is not None else {'''shortest_edge''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __lowerCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = resample __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_normalize __lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD __lowerCamelCase = do_convert_rgb def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowerCamelCase = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = size if size is not None else self.size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = resample if resample is not None else self.resample __lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCamelCase = crop_size if crop_size is not None else self.crop_size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase = image_mean if image_mean is not None else self.image_mean __lowerCamelCase = image_std if image_std is not None else self.image_std __lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowerCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowerCamelCase = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: __lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: __lowerCamelCase = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: __lowerCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] __lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __lowerCamelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
330
1
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def a__ ( _UpperCamelCase : Union[str, Any] ): __lowerCamelCase = args.pruning_method __lowerCamelCase = args.threshold __lowerCamelCase = args.model_name_or_path.rstrip('''/''' ) __lowerCamelCase = args.target_model_path print(F"""Load fine-pruned model from {model_name_or_path}""" ) __lowerCamelCase = torch.load(os.path.join(_UpperCamelCase ,'''pytorch_model.bin''' ) ) __lowerCamelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: __lowerCamelCase = tensor print(F"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: __lowerCamelCase = tensor print(F"""Copied layer {name}""" ) elif "bias" in name: __lowerCamelCase = tensor print(F"""Copied layer {name}""" ) else: if pruning_method == "magnitude": __lowerCamelCase = MagnitudeBinarizer.apply(inputs=_UpperCamelCase ,threshold=_UpperCamelCase ) __lowerCamelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue __lowerCamelCase = name[:-6] __lowerCamelCase = model[F"""{prefix_}mask_scores"""] __lowerCamelCase = TopKBinarizer.apply(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue __lowerCamelCase = name[:-6] __lowerCamelCase = model[F"""{prefix_}mask_scores"""] __lowerCamelCase = ThresholdBinarizer.apply(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue __lowerCamelCase = name[:-6] __lowerCamelCase = model[F"""{prefix_}mask_scores"""] __lowerCamelCase ,__lowerCamelCase = -0.1, 1.1 __lowerCamelCase = torch.sigmoid(_UpperCamelCase ) __lowerCamelCase = s * (r - l) + l __lowerCamelCase = s_bar.clamp(min=0.0 ,max=1.0 ) __lowerCamelCase = tensor * mask print(F"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: __lowerCamelCase = os.path.join( os.path.dirname(_UpperCamelCase ) ,F"""bertarized_{os.path.basename(_UpperCamelCase )}""" ) if not os.path.isdir(_UpperCamelCase ): shutil.copytree(_UpperCamelCase ,_UpperCamelCase ) print(F"""\nCreated folder {target_model_path}""" ) torch.save(_UpperCamelCase ,os.path.join(_UpperCamelCase ,'''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) a_ = parser.parse_args() main(args)
330
from __future__ import annotations from typing import Generic, TypeVar a_ = TypeVar("""T""") class __lowerCAmelCase ( Generic[T] ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = data __lowerCamelCase = self __lowerCamelCase = 0 class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # map from node name to the node object __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # create a new set with x as its member __lowerCamelCase = DisjointSetTreeNode(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # find the set x belongs to (with path-compression) __lowerCamelCase = self.map[data] if elem_ref != elem_ref.parent: __lowerCamelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # helper function for union operation if nodea.rank > nodea.rank: __lowerCamelCase = nodea else: __lowerCamelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # merge 2 disjoint sets self.link(self.find_set(__UpperCAmelCase ) , self.find_set(__UpperCAmelCase ) ) class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # connections: map from the node to the neighbouring nodes (with weights) __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # add a node ONLY if its not present in the graph if node not in self.connections: __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # add an edge with the given weight self.add_node(__UpperCAmelCase ) self.add_node(__UpperCAmelCase ) __lowerCamelCase = weight __lowerCamelCase = weight def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __UpperCAmelCase : x[2] ) # creating the disjoint set __lowerCamelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__UpperCAmelCase ) # MST generation __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = edges[index] index += 1 __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) disjoint_set.union(__UpperCAmelCase , __UpperCAmelCase ) return graph
330
1
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self , __UpperCAmelCase = 128 , __UpperCAmelCase = 256 , __UpperCAmelCase = 2_000.0 , __UpperCAmelCase = 768 , __UpperCAmelCase = 12 , __UpperCAmelCase = 12 , __UpperCAmelCase = 64 , __UpperCAmelCase = 2048 , __UpperCAmelCase = 0.1 , ): '''simple docstring''' super().__init__() __lowerCamelCase = nn.Sequential( nn.Linear(__UpperCAmelCase , d_model * 4 , bias=__UpperCAmelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__UpperCAmelCase ) , nn.SiLU() , ) __lowerCamelCase = nn.Embedding(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = False __lowerCamelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase ) __lowerCamelCase = nn.Dropout(p=__UpperCAmelCase ) __lowerCamelCase = nn.ModuleList() for lyr_num in range(__UpperCAmelCase ): # FiLM conditional T5 decoder __lowerCamelCase = DecoderLayer(d_model=__UpperCAmelCase , d_kv=__UpperCAmelCase , num_heads=__UpperCAmelCase , d_ff=__UpperCAmelCase , dropout_rate=__UpperCAmelCase ) self.decoders.append(__UpperCAmelCase ) __lowerCamelCase = TaLayerNorm(__UpperCAmelCase ) __lowerCamelCase = nn.Dropout(p=__UpperCAmelCase ) __lowerCamelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. __lowerCamelCase = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) __lowerCamelCase = self.conditioning_emb(__UpperCAmelCase ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) __lowerCamelCase = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. __lowerCamelCase = torch.broadcast_to( torch.arange(__UpperCAmelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , ) __lowerCamelCase = self.position_encoding(__UpperCAmelCase ) __lowerCamelCase = self.continuous_inputs_projection(__UpperCAmelCase ) inputs += position_encodings __lowerCamelCase = self.dropout(__UpperCAmelCase ) # decoder: No padding present. __lowerCamelCase = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. __lowerCamelCase = [(x, self.encoder_decoder_mask(__UpperCAmelCase , __UpperCAmelCase )) for x, y in encodings_and_masks] # cross attend style: concat encodings __lowerCamelCase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) __lowerCamelCase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: __lowerCamelCase = lyr( __UpperCAmelCase , conditioning_emb=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )[0] __lowerCamelCase = self.decoder_norm(__UpperCAmelCase ) __lowerCamelCase = self.post_dropout(__UpperCAmelCase ) __lowerCamelCase = self.spec_out(__UpperCAmelCase ) return spec_out class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1E-6 ): '''simple docstring''' super().__init__() __lowerCamelCase = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=__UpperCAmelCase , d_kv=__UpperCAmelCase , num_heads=__UpperCAmelCase , dropout_rate=__UpperCAmelCase ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=__UpperCAmelCase , d_kv=__UpperCAmelCase , num_heads=__UpperCAmelCase , dropout_rate=__UpperCAmelCase , layer_norm_epsilon=__UpperCAmelCase , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=__UpperCAmelCase , d_ff=__UpperCAmelCase , dropout_rate=__UpperCAmelCase , layer_norm_epsilon=__UpperCAmelCase ) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = self.layer[0]( __UpperCAmelCase , conditioning_emb=__UpperCAmelCase , attention_mask=__UpperCAmelCase , ) if encoder_hidden_states is not None: __lowerCamelCase = torch.where(encoder_attention_mask > 0 , 0 , -1E1_0 ).to( encoder_hidden_states.dtype ) __lowerCamelCase = self.layer[1]( __UpperCAmelCase , key_value_states=__UpperCAmelCase , attention_mask=__UpperCAmelCase , ) # Apply Film Conditional Feed Forward layer __lowerCamelCase = self.layer[-1](__UpperCAmelCase , __UpperCAmelCase ) return (hidden_states,) class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __lowerCamelCase = TaLayerNorm(__UpperCAmelCase ) __lowerCamelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=__UpperCAmelCase ) __lowerCamelCase = Attention(query_dim=__UpperCAmelCase , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , out_bias=__UpperCAmelCase , scale_qk=__UpperCAmelCase ) __lowerCamelCase = nn.Dropout(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , ): '''simple docstring''' # pre_self_attention_layer_norm __lowerCamelCase = self.layer_norm(__UpperCAmelCase ) if conditioning_emb is not None: __lowerCamelCase = self.FiLMLayer(__UpperCAmelCase , __UpperCAmelCase ) # Self-attention block __lowerCamelCase = self.attention(__UpperCAmelCase ) __lowerCamelCase = hidden_states + self.dropout(__UpperCAmelCase ) return hidden_states class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __lowerCamelCase = Attention(query_dim=__UpperCAmelCase , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , out_bias=__UpperCAmelCase , scale_qk=__UpperCAmelCase ) __lowerCamelCase = TaLayerNorm(__UpperCAmelCase , eps=__UpperCAmelCase ) __lowerCamelCase = nn.Dropout(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = self.layer_norm(__UpperCAmelCase ) __lowerCamelCase = self.attention( __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , attention_mask=attention_mask.squeeze(1 ) , ) __lowerCamelCase = hidden_states + self.dropout(__UpperCAmelCase ) return layer_output class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __lowerCamelCase = TaDenseGatedActDense(d_model=__UpperCAmelCase , d_ff=__UpperCAmelCase , dropout_rate=__UpperCAmelCase ) __lowerCamelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=__UpperCAmelCase ) __lowerCamelCase = TaLayerNorm(__UpperCAmelCase , eps=__UpperCAmelCase ) __lowerCamelCase = nn.Dropout(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = self.layer_norm(__UpperCAmelCase ) if conditioning_emb is not None: __lowerCamelCase = self.film(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = self.DenseReluDense(__UpperCAmelCase ) __lowerCamelCase = hidden_states + self.dropout(__UpperCAmelCase ) return hidden_states class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __lowerCamelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase ) __lowerCamelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase ) __lowerCamelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase ) __lowerCamelCase = nn.Dropout(__UpperCAmelCase ) __lowerCamelCase = NewGELUActivation() def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.act(self.wi_a(__UpperCAmelCase ) ) __lowerCamelCase = self.wi_a(__UpperCAmelCase ) __lowerCamelCase = hidden_gelu * hidden_linear __lowerCamelCase = self.dropout(__UpperCAmelCase ) __lowerCamelCase = self.wo(__UpperCAmelCase ) return hidden_states class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1E-6 ): '''simple docstring''' super().__init__() __lowerCamelCase = nn.Parameter(torch.ones(__UpperCAmelCase ) ) __lowerCamelCase = eps def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 __lowerCamelCase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__UpperCAmelCase ) __lowerCamelCase = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: __lowerCamelCase = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class __lowerCAmelCase ( nn.Module ): def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__UpperCAmelCase , 3.0 )) )) class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __lowerCamelCase = nn.Linear(__UpperCAmelCase , out_features * 2 , bias=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.scale_bias(__UpperCAmelCase ) __lowerCamelCase ,__lowerCamelCase = torch.chunk(__UpperCAmelCase , 2 , -1 ) __lowerCamelCase = x * (1 + scale) + shift return x
330
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self ): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_config() __lowerCamelCase = 300 return config def lowerCamelCase ( self ): '''simple docstring''' ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = self.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = True __lowerCamelCase = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_choices __lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = () def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCamelCase = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason='''MRA does not output attentions''' ) def lowerCamelCase ( self ): '''simple docstring''' return @require_torch class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) __lowerCamelCase = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
1
import math def a__ ( _UpperCamelCase : list ,_UpperCamelCase : int ): __lowerCamelCase = len(_UpperCamelCase ) __lowerCamelCase = int(math.floor(math.sqrt(_UpperCamelCase ) ) ) __lowerCamelCase = 0 while arr[min(_UpperCamelCase ,_UpperCamelCase ) - 1] < x: __lowerCamelCase = step step += int(math.floor(math.sqrt(_UpperCamelCase ) ) ) if prev >= n: return -1 while arr[prev] < x: __lowerCamelCase = prev + 1 if prev == min(_UpperCamelCase ,_UpperCamelCase ): return -1 if arr[prev] == x: return prev return -1 if __name__ == "__main__": a_ = input("""Enter numbers separated by a comma:\n""").strip() a_ = [int(item) for item in user_input.split(""",""")] a_ = int(input("""Enter the number to be searched:\n""")) a_ = jump_search(arr, x) if res == -1: print("""Number not found!""") else: print(f"Number {x} is at index {res}")
330
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
1
import operator as op def a__ ( _UpperCamelCase : Any ): __lowerCamelCase = [] __lowerCamelCase = lambda _UpperCamelCase ,_UpperCamelCase : int(x / y ) # noqa: E731 integer division operation __lowerCamelCase = { '''^''': op.pow, '''*''': op.mul, '''/''': div, '''+''': op.add, '''-''': op.sub, } # operators & their respective operation # print table header print('''Symbol'''.center(8 ) ,'''Action'''.center(12 ) ,'''Stack''' ,sep=''' | ''' ) print('''-''' * (30 + len(_UpperCamelCase )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(_UpperCamelCase ) # append x to stack # output in tabular format print(x.rjust(8 ) ,('''push(''' + x + ''')''').ljust(12 ) ,''','''.join(_UpperCamelCase ) ,sep=''' | ''' ) else: __lowerCamelCase = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8 ) ,('''pop(''' + b + ''')''').ljust(12 ) ,''','''.join(_UpperCamelCase ) ,sep=''' | ''' ) __lowerCamelCase = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8 ) ,('''pop(''' + a + ''')''').ljust(12 ) ,''','''.join(_UpperCamelCase ) ,sep=''' | ''' ) stack.append( str(opr[x](int(_UpperCamelCase ) ,int(_UpperCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) ,('''push(''' + a + x + b + ''')''').ljust(12 ) ,''','''.join(_UpperCamelCase ) ,sep=''' | ''' ,) return int(stack[0] ) if __name__ == "__main__": a_ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """) print("""\n\tResult = """, solve(Postfix))
330
from string import ascii_lowercase, ascii_uppercase def a__ ( _UpperCamelCase : str ): if not sentence: return "" __lowerCamelCase = dict(zip(_UpperCamelCase ,_UpperCamelCase ) ) return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
330
1
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) __lowerCamelCase = AutoTokenizer.from_pretrained('''xlm-roberta-base''' ) __lowerCamelCase = '''The dog is cute and lives in the garden house''' __lowerCamelCase = jnp.array([tokenizer.encode(__UpperCAmelCase )] ) __lowerCamelCase = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim __lowerCamelCase = jnp.array( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) __lowerCamelCase = model(__UpperCAmelCase )['''last_hidden_state'''] self.assertEqual(output.shape , __UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , __UpperCAmelCase , atol=1E-3 ) )
330
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __lowerCAmelCase ( lowerCAmelCase__ ): @slow @require_torch def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' ) __lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) __lowerCamelCase = bertabert.config.encoder.vocab_size __lowerCamelCase = tokenizer.sep_token_id __lowerCamelCase = tokenizer.cls_token_id __lowerCamelCase = 128 __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' ) __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' ) __lowerCamelCase = train_dataset.select(range(32 ) ) __lowerCamelCase = val_dataset.select(range(16 ) ) __lowerCamelCase = 4 def _map_to_encoder_decoder_inputs(__UpperCAmelCase ): # Tokenizer will automatically set [BOS] <text> [EOS] __lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 ) __lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 ) __lowerCamelCase = inputs.input_ids __lowerCamelCase = inputs.attention_mask __lowerCamelCase = outputs.input_ids __lowerCamelCase = outputs.input_ids.copy() __lowerCamelCase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels'''] ] __lowerCamelCase = outputs.attention_mask assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(__UpperCAmelCase ): __lowerCamelCase = pred.label_ids __lowerCamelCase = pred.predictions # all unnecessary tokens are removed __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase ) return {"accuracy": accuracy} # map train dataset __lowerCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) train_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) # same for validation dataset __lowerCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) val_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) __lowerCamelCase = self.get_auto_remove_tmp_dir() __lowerCamelCase = SeqaSeqTrainingArguments( output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __lowerCamelCase = SeqaSeqTrainer( model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , ) # start training trainer.train()
330
1
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { """vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""", # See all GLPN models at https://huggingface.co/models?filter=glpn } class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """glpn""" def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[8, 4, 2, 1] , __UpperCAmelCase=[32, 64, 160, 256] , __UpperCAmelCase=[7, 3, 3, 3] , __UpperCAmelCase=[4, 2, 2, 2] , __UpperCAmelCase=[1, 2, 5, 8] , __UpperCAmelCase=[4, 4, 4, 4] , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=64 , __UpperCAmelCase=10 , __UpperCAmelCase=-1 , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __lowerCamelCase = num_channels __lowerCamelCase = num_encoder_blocks __lowerCamelCase = depths __lowerCamelCase = sr_ratios __lowerCamelCase = hidden_sizes __lowerCamelCase = patch_sizes __lowerCamelCase = strides __lowerCamelCase = mlp_ratios __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = initializer_range __lowerCamelCase = drop_path_rate __lowerCamelCase = layer_norm_eps __lowerCamelCase = decoder_hidden_size __lowerCamelCase = max_depth __lowerCamelCase = head_in_index
330
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
1
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES a_ = """tiny-wmt19-en-ru""" # Build # borrowed from a test a_ = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] a_ = dict(zip(vocab, range(len(vocab)))) a_ = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] with tempfile.TemporaryDirectory() as tmpdirname: a_ = Path(tmpdirname) a_ = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""] a_ = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""] a_ = build_dir / VOCAB_FILES_NAMES["""merges_file"""] with open(src_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, """w""") as fp: fp.write("""\n""".join(merges)) a_ = FSMTTokenizer( langs=["""en""", """ru"""], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) a_ = FSMTConfig( langs=["""ru""", """en"""], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) a_ = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") # Test a_ = tokenizer(["""Making tiny model"""], return_tensors="""pt""") a_ = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
330
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=1000 ): '''simple docstring''' __lowerCamelCase = p_stop __lowerCamelCase = max_length def __iter__( self ): '''simple docstring''' __lowerCamelCase = 0 __lowerCamelCase = False while not stop and count < self.max_length: yield count count += 1 __lowerCamelCase = random.random() < self.p_stop class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = [ BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 ) ] __lowerCamelCase = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] __lowerCamelCase = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ): '''simple docstring''' random.seed(__UpperCAmelCase ) __lowerCamelCase = list(__UpperCAmelCase ) __lowerCamelCase = [ IterableDatasetShard( __UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , ) for i in range(__UpperCAmelCase ) ] __lowerCamelCase = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(__UpperCAmelCase ) iterable_dataset_lists.append(list(__UpperCAmelCase ) ) __lowerCamelCase = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size __lowerCamelCase = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 ) __lowerCamelCase = [] for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(__UpperCAmelCase ) < len(__UpperCAmelCase ): reference += reference self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = 42 __lowerCamelCase = RandomIterableDataset() self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) # Edge case with a very small dataset __lowerCamelCase = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = SkipBatchSampler(__UpperCAmelCase , 2 ) self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 ) __lowerCamelCase = skip_first_batches(__UpperCAmelCase , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def lowerCamelCase ( self ): '''simple docstring''' Accelerator() __lowerCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
330
1
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) a_ = logging.get_logger(__name__) # pylint: disable=invalid-name a_ = """ Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)[\"depth\"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline(\"depth-estimation\") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to(\"cuda\") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to(\"cuda\") >>> img = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/cat.png\" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\") >>> prompt = \"A robot, 4k photo\" >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\" >>> generator = torch.Generator(device=\"cuda\").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save(\"robot_cat.png\") ``` """ def a__ ( _UpperCamelCase : int ,_UpperCamelCase : str ,_UpperCamelCase : Dict=8 ): __lowerCamelCase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 __lowerCamelCase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' super().__init__() self.register_modules( unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , movq=__UpperCAmelCase , ) __lowerCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if latents is None: __lowerCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase ) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) __lowerCamelCase = latents.to(__UpperCAmelCase ) __lowerCamelCase = latents * scheduler.init_noise_sigma return latents def lowerCamelCase ( self , __UpperCAmelCase=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) __lowerCamelCase = torch.device(F"""cuda:{gpu_id}""" ) __lowerCamelCase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) __lowerCamelCase = torch.device(F"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=__UpperCAmelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __lowerCamelCase = None for cpu_offloaded_model in [self.unet, self.movq]: __lowerCamelCase ,__lowerCamelCase = cpu_offload_with_hook(__UpperCAmelCase , __UpperCAmelCase , prev_module_hook=__UpperCAmelCase ) # We'll offload the last model manually. __lowerCamelCase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase ( self ): '''simple docstring''' if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(__UpperCAmelCase , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__UpperCAmelCase ) def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 512 , __UpperCAmelCase = 512 , __UpperCAmelCase = 100 , __UpperCAmelCase = 4.0 , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , ): '''simple docstring''' __lowerCamelCase = self._execution_device __lowerCamelCase = guidance_scale > 1.0 if isinstance(__UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=0 ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=0 ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=0 ) __lowerCamelCase = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: __lowerCamelCase = image_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) __lowerCamelCase = negative_image_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) __lowerCamelCase = hint.repeat_interleave(__UpperCAmelCase , dim=0 ) __lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCAmelCase ) __lowerCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCAmelCase ) self.scheduler.set_timesteps(__UpperCAmelCase , device=__UpperCAmelCase ) __lowerCamelCase = self.scheduler.timesteps __lowerCamelCase = self.movq.config.latent_channels __lowerCamelCase ,__lowerCamelCase = downscale_height_and_width(__UpperCAmelCase , __UpperCAmelCase , self.movq_scale_factor ) # create initial latent __lowerCamelCase = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ): # expand the latents if we are doing classifier free guidance __lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __lowerCamelCase = {'''image_embeds''': image_embeds, '''hint''': hint} __lowerCamelCase = self.unet( sample=__UpperCAmelCase , timestep=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , added_cond_kwargs=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0] if do_classifier_free_guidance: __lowerCamelCase ,__lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 ) __lowerCamelCase ,__lowerCamelCase = noise_pred.chunk(2 ) __lowerCamelCase ,__lowerCamelCase = variance_pred.chunk(2 ) __lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __lowerCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __lowerCamelCase ,__lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __lowerCamelCase = self.scheduler.step( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase , )[0] # post-processing __lowerCamelCase = self.movq.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: __lowerCamelCase = image * 0.5 + 0.5 __lowerCamelCase = image.clamp(0 , 1 ) __lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __lowerCamelCase = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase )
330
def a__ ( _UpperCamelCase : int ): __lowerCamelCase = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
330
1
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def a__ ( ): __lowerCamelCase = argparse.ArgumentParser() parser.add_argument( '''-m''' ,'''--pretrained_model_name_or_path''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,required=_UpperCamelCase ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,) parser.add_argument( '''-c''' ,'''--caption''' ,type=_UpperCamelCase ,default='''robotic cat with wings''' ,help='''Text used to generate images.''' ,) parser.add_argument( '''-n''' ,'''--images_num''' ,type=_UpperCamelCase ,default=4 ,help='''How much images to generate.''' ,) parser.add_argument( '''-s''' ,'''--seed''' ,type=_UpperCamelCase ,default=42 ,help='''Seed for random process.''' ,) parser.add_argument( '''-ci''' ,'''--cuda_id''' ,type=_UpperCamelCase ,default=0 ,help='''cuda_id.''' ,) __lowerCamelCase = parser.parse_args() return args def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : List[str] ,_UpperCamelCase : Union[str, Any] ): if not len(_UpperCamelCase ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) __lowerCamelCase ,__lowerCamelCase = imgs[0].size __lowerCamelCase = Image.new('''RGB''' ,size=(cols * w, rows * h) ) __lowerCamelCase ,__lowerCamelCase = grid.size for i, img in enumerate(_UpperCamelCase ): grid.paste(_UpperCamelCase ,box=(i % cols * w, i // cols * h) ) return grid def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any]="robotic cat with wings" ,_UpperCamelCase : Any=7.5 ,_UpperCamelCase : Dict=50 ,_UpperCamelCase : List[str]=1 ,_UpperCamelCase : List[str]=42 ,): __lowerCamelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCamelCase ) __lowerCamelCase = pipeline( _UpperCamelCase ,guidance_scale=_UpperCamelCase ,num_inference_steps=_UpperCamelCase ,generator=_UpperCamelCase ,num_images_per_prompt=_UpperCamelCase ,).images __lowerCamelCase = int(math.sqrt(_UpperCamelCase ) ) __lowerCamelCase = image_grid(_UpperCamelCase ,rows=_rows ,cols=num_images_per_prompt // _rows ) return grid, images a_ = parse_args() # Load models and create wrapper for stable diffusion a_ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""") a_ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""") a_ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""") a_ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""") a_ = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) a_ = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")): a_ = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, """unet""", unet) else: a_ = unet.to(torch.device("""cuda""", args.cuda_id)) a_ = pipeline.to(unet.device) a_ , a_ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split())))) a_ = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
330
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ = 16 a_ = 32 def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : int = 16 ): __lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' ) def tokenize_function(_UpperCamelCase : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCamelCase = datasets.map( _UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' ) def collate_fn(_UpperCamelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCamelCase = 16 elif accelerator.mixed_precision != "no": __lowerCamelCase = 8 else: __lowerCamelCase = None return tokenizer.pad( _UpperCamelCase ,padding='''longest''' ,max_length=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_tensors='''pt''' ,) # Instantiate dataloaders. __lowerCamelCase = DataLoader( tokenized_datasets['''train'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) __lowerCamelCase = DataLoader( tokenized_datasets['''validation'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders a_ = mocked_dataloaders # noqa: F811 def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,_UpperCamelCase ) == "1": __lowerCamelCase = 2 # Initialize accelerator __lowerCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCamelCase = config['''lr'''] __lowerCamelCase = int(config['''num_epochs'''] ) __lowerCamelCase = int(config['''seed'''] ) __lowerCamelCase = int(config['''batch_size'''] ) __lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=_UpperCamelCase ) def inner_training_loop(_UpperCamelCase : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(_UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCamelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCamelCase = AdamW(params=model.parameters() ,lr=_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = get_dataloaders(_UpperCamelCase ,_UpperCamelCase ) # Instantiate scheduler __lowerCamelCase = get_linear_schedule_with_warmup( optimizer=_UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCamelCase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) # Now we train the model for epoch in range(_UpperCamelCase ): model.train() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.loss accelerator.backward(_UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.logits.argmax(dim=-1 ) __lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_UpperCamelCase ,references=_UpperCamelCase ,) __lowerCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" ,_UpperCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a__ ( ): __lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' ,) parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_UpperCamelCase ,_UpperCamelCase ) if __name__ == "__main__": main()
330
1
a_ = { 0: """0""", 1: """1""", 2: """2""", 3: """3""", 4: """4""", 5: """5""", 6: """6""", 7: """7""", 8: """8""", 9: """9""", 10: """a""", 11: """b""", 12: """c""", 13: """d""", 14: """e""", 15: """f""", } def a__ ( _UpperCamelCase : float ): assert type(_UpperCamelCase ) in (int, float) and decimal == int(_UpperCamelCase ) __lowerCamelCase = int(_UpperCamelCase ) __lowerCamelCase = '''''' __lowerCamelCase = False if decimal < 0: __lowerCamelCase = True decimal *= -1 while decimal > 0: __lowerCamelCase ,__lowerCamelCase = divmod(_UpperCamelCase ,16 ) __lowerCamelCase = values[remainder] + hexadecimal __lowerCamelCase = '''0x''' + hexadecimal if negative: __lowerCamelCase = '''-''' + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
330
import logging import os import threading import time try: import warnings except ImportError: a_ = None try: import msvcrt except ImportError: a_ = None try: import fcntl except ImportError: a_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: a_ = OSError # Data # ------------------------------------------------ a_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] a_ = """3.0.12""" a_ = None def a__ ( ): global _logger __lowerCamelCase = _logger or logging.getLogger(__name__ ) return _logger class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock_file return None def __str__( self ): '''simple docstring''' __lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class __lowerCAmelCase : def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock return None def __enter__( self ): '''simple docstring''' return self.lock def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.lock.release() return None class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase ) # The path to the lock file. __lowerCamelCase = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __lowerCamelCase = None # The default timeout value. __lowerCamelCase = timeout # We use this lock primarily for the lock counter. __lowerCamelCase = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __lowerCamelCase = 0 return None @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file @property def lowerCamelCase ( self ): '''simple docstring''' return self._timeout @timeout.setter def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = float(__UpperCAmelCase ) return None def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file_fd is not None def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ): '''simple docstring''' # Use the default timeout, if no timeout is provided. if timeout is None: __lowerCamelCase = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file __lowerCamelCase = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(__UpperCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __lowerCamelCase = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowerCamelCase ( self , __UpperCAmelCase=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __lowerCamelCase = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self ): '''simple docstring''' self.acquire() return self def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.release() return None def __del__( self ): '''simple docstring''' self.release(force=__UpperCAmelCase ) return None def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = os.path.basename(__UpperCAmelCase ) if len(__UpperCAmelCase ) > max_length and max_length > 0: __lowerCamelCase = os.path.dirname(__UpperCAmelCase ) __lowerCamelCase = str(hash(__UpperCAmelCase ) ) __lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(__UpperCAmelCase , __UpperCAmelCase ) else: return path class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) __lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: try: msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(__UpperCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) try: fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN ) os.close(__UpperCAmelCase ) return None class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' os.close(self._lock_file_fd ) __lowerCamelCase = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None a_ = None if msvcrt: a_ = WindowsFileLock elif fcntl: a_ = UnixFileLock else: a_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
330
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging a_ = logging.get_logger(__name__) class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""pixel_values"""] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __lowerCamelCase = size if size is not None else {'''shortest_edge''': 256} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __lowerCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase ) __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = resample __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_normalize __lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowerCamelCase = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = size if size is not None else self.size __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __lowerCamelCase = resample if resample is not None else self.resample __lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCamelCase = crop_size if crop_size is not None else self.crop_size __lowerCamelCase = get_size_dict(__UpperCAmelCase ) __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase = image_mean if image_mean is not None else self.image_mean __lowerCamelCase = image_std if image_std is not None else self.image_std __lowerCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: __lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: __lowerCamelCase = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: __lowerCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] __lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __lowerCamelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
330
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase="divided_space_time" , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = num_channels __lowerCamelCase = patch_size __lowerCamelCase = num_frames __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = attention_type __lowerCamelCase = initializer_range __lowerCamelCase = scope __lowerCamelCase = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __lowerCamelCase = (image_size // patch_size) ** 2 __lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __lowerCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __lowerCamelCase = self.num_labels return config def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) # verify the logits shape __lowerCamelCase = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs __lowerCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowerCAmelCase__ = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerModelTester(self ) __lowerCamelCase = ConfigTester( self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = copy.deepcopy(__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def lowerCamelCase ( self ): '''simple docstring''' pass def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = TimesformerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' if not self.has_attentions: pass else: __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase = True for model_class in self.all_model_classes: __lowerCamelCase = self.model_tester.seq_length __lowerCamelCase = self.model_tester.num_frames __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __lowerCamelCase = len(__UpperCAmelCase ) # Check attention is always last and order is fine __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(out_len + 1 , len(__UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowerCamelCase ( self ): '''simple docstring''' def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.hidden_states __lowerCamelCase = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) __lowerCamelCase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def a__ ( ): __lowerCamelCase = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' ) __lowerCamelCase = np.load(_UpperCamelCase ) return list(_UpperCamelCase ) @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self ): '''simple docstring''' # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( __UpperCAmelCase ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_video() __lowerCamelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**__UpperCAmelCase ) # verify the logits __lowerCamelCase = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
1
from __future__ import annotations def a__ ( _UpperCamelCase : list[float] ): if len(_UpperCamelCase ) < 2: raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' ) if any(i <= 0 for i in nums ): raise ValueError('''All values must be greater than 0''' ) __lowerCamelCase = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
330
def a__ ( _UpperCamelCase : int ): if not isinstance(_UpperCamelCase ,_UpperCamelCase ): __lowerCamelCase = F"""Input value of [number={number}] must be an integer""" raise TypeError(_UpperCamelCase ) if number < 0: return False __lowerCamelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
330
1
from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def a__ ( ): import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join __lowerCamelCase = '''__test_patch_submodule_mock__''' with patch_submodule(_test_patching ,'''os.path.join''' ,_UpperCamelCase ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os ,_PatchedModuleObj ) assert isinstance(_test_patching.os.path ,_PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path ,_PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os ,_PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path ,_PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path ,_PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def a__ ( ): assert _test_patching.open is open __lowerCamelCase = '''__test_patch_submodule_builtin_mock__''' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching ,'''open''' ,_UpperCamelCase ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def a__ ( ): # pandas.read_csv is not present in _test_patching __lowerCamelCase = '''__test_patch_submodule_missing_mock__''' with patch_submodule(_test_patching ,'''pandas.read_csv''' ,_UpperCamelCase ): pass def a__ ( ): # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point __lowerCamelCase = '''__test_patch_submodule_missing_builtin_mock__''' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching ,'''len''' ,_UpperCamelCase ) is None with patch_submodule(_test_patching ,'''len''' ,_UpperCamelCase ): assert _test_patching.len is mock assert _test_patching.len is len def a__ ( ): __lowerCamelCase = '''__test_patch_submodule_start_and_stop_mock__''' __lowerCamelCase = patch_submodule(_test_patching ,'''open''' ,_UpperCamelCase ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def a__ ( ): from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join __lowerCamelCase = '''__test_patch_submodule_successive_join__''' __lowerCamelCase = '''__test_patch_submodule_successive_dirname__''' __lowerCamelCase = '''__test_patch_submodule_successive_rename__''' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching ,'''os.path.join''' ,_UpperCamelCase ): with patch_submodule(_test_patching ,'''os.rename''' ,_UpperCamelCase ): with patch_submodule(_test_patching ,'''os.path.dirname''' ,_UpperCamelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching ,'''os.rename''' ,_UpperCamelCase ): with patch_submodule(_test_patching ,'''os.path.join''' ,_UpperCamelCase ): with patch_submodule(_test_patching ,'''os.path.dirname''' ,_UpperCamelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def a__ ( ): __lowerCamelCase = '''__test_patch_submodule_doesnt_exist_mock__''' with patch_submodule(_test_patching ,'''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' ,_UpperCamelCase ): pass with patch_submodule(_test_patching ,'''os.__attribute_that_doesn_exist__''' ,_UpperCamelCase ): pass
330
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={"_".join([str(__UpperCAmelCase ) for s in shape] )}.npy""" def lowerCamelCase ( self ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 4, 64, 64) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return image def lowerCamelCase ( self , __UpperCAmelCase=False , __UpperCAmelCase="CompVis/stable-diffusion-v1-4" ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = '''bf16''' if fpaa else None __lowerCamelCase ,__lowerCamelCase = FlaxUNetaDConditionModel.from_pretrained( __UpperCAmelCase , subfolder='''unet''' , dtype=__UpperCAmelCase , revision=__UpperCAmelCase ) return model, params def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 77, 768) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , shape=(4, 4, 96, 96) , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , shape=(4, 77, 1024) , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
330
1
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
330
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' ) __lowerCamelCase = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __lowerCamelCase = model(__UpperCAmelCase )['''last_hidden_state'''] __lowerCamelCase = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) # compare the actual values for a slice. __lowerCamelCase = tf.convert_to_tensor( [[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
330
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def a__ ( _UpperCamelCase : Optional[int] ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __lowerCamelCase = module __lowerCamelCase = nn.Sequential( nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , ) __lowerCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowerCamelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module lowerCAmelCase__ = """bigscience/bloom-1b7""" # Constant values lowerCAmelCase__ = 2.1_09_65_95_52_69_25_74 lowerCAmelCase__ = """Hello my name is""" lowerCAmelCase__ = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) lowerCAmelCase__ = 1_0 def lowerCamelCase ( self ): '''simple docstring''' # Models and tokenizer __lowerCamelCase = AutoTokenizer.from_pretrained(self.model_name ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # Models and tokenizer __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='''auto''' ) __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_abit.config self.assertTrue(hasattr(__UpperCAmelCase , '''quantization_config''' ) ) __lowerCamelCase = config.to_dict() __lowerCamelCase = config.to_diff_dict() __lowerCamelCase = config.to_json_string() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit __lowerCamelCase = self.model_fpaa.get_memory_footprint() __lowerCamelCase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __lowerCamelCase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__UpperCAmelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() __lowerCamelCase = True __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = model_abit_from_config.generate( input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() with self.assertRaises(__UpperCAmelCase ): __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ): # Tries with `str` self.model_abit.to('''cpu''' ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device('''cuda:0''' ) ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_fpaa.to(torch.floataa ) __lowerCamelCase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.to('''cpu''' ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.half() # Check this does not throw an error __lowerCamelCase = self.model_fpaa.float() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls ): '''simple docstring''' __lowerCamelCase = '''t5-small''' __lowerCamelCase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense __lowerCamelCase = AutoTokenizer.from_pretrained(cls.model_name ) __lowerCamelCase = '''Translate in German: Hello, my dog is cute''' def lowerCamelCase ( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaForConditionalGeneration __lowerCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules __lowerCamelCase = None # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) __lowerCamelCase = modules def lowerCamelCase ( self ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # model_name __lowerCamelCase = '''bigscience/bloom-560m''' __lowerCamelCase = '''t5-small''' # Different types of model __lowerCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Sequence classification model __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # CausalLM model __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Seq2seq model __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = pipeline( '''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __lowerCamelCase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''balanced''' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) # Second real batch __lowerCamelCase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''facebook/opt-350m''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ): return # Step 1: freeze all parameters __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __lowerCamelCase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __lowerCamelCase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__UpperCAmelCase ) ): __lowerCamelCase = LoRALayer(module.q_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.k_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __lowerCamelCase = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __lowerCamelCase = model.forward(**__UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__UpperCAmelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """gpt2-xl""" lowerCAmelCase__ = 3.31_91_85_48_54_15_21_87
330
1
class __lowerCAmelCase : def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = val __lowerCamelCase = None __lowerCamelCase = None def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: __lowerCamelCase = Node(__UpperCAmelCase ) else: self.left.insert(__UpperCAmelCase ) elif val > self.val: if self.right is None: __lowerCamelCase = Node(__UpperCAmelCase ) else: self.right.insert(__UpperCAmelCase ) else: __lowerCamelCase = val def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : str ): # Recursive traversal if root: inorder(root.left ,_UpperCamelCase ) res.append(root.val ) inorder(root.right ,_UpperCamelCase ) def a__ ( _UpperCamelCase : Union[str, Any] ): # Build BST if len(_UpperCamelCase ) == 0: return arr __lowerCamelCase = Node(arr[0] ) for i in range(1 ,len(_UpperCamelCase ) ): root.insert(arr[i] ) # Traverse BST in order. __lowerCamelCase = [] inorder(_UpperCamelCase ,_UpperCamelCase ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
330
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = 42 class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ = True @register_to_config def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (64,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 4 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 0.18_215 , ): '''simple docstring''' super().__init__() # pass init params to Encoder __lowerCamelCase = Encoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , ) # pass init params to Decoder __lowerCamelCase = Decoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , ) __lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __lowerCamelCase = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 ) __lowerCamelCase = False __lowerCamelCase = False # only relevant if vae tiling is enabled __lowerCamelCase = self.config.sample_size __lowerCamelCase = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __lowerCamelCase = 0.25 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' if isinstance(__UpperCAmelCase , (Encoder, Decoder) ): __lowerCamelCase = value def lowerCamelCase ( self , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = use_tiling def lowerCamelCase ( self ): '''simple docstring''' self.enable_tiling(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = True def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = {} def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): __lowerCamelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return processors def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = len(self.attn_processors.keys() ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count: raise ValueError( F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the""" F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): module.set_processor(__UpperCAmelCase ) else: module.set_processor(processor.pop(F"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) if self.use_slicing and x.shape[0] > 1: __lowerCamelCase = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_slicing and z.shape[0] > 1: __lowerCamelCase = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self._decode(__UpperCAmelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[2] , b.shape[2] , __UpperCAmelCase ) for y in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[3] , b.shape[3] , __UpperCAmelCase ) for x in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __lowerCamelCase = [] for i in range(0 , x.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , x.shape[3] , __UpperCAmelCase ): __lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __lowerCamelCase = [] for i in range(0 , z.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , z.shape[3] , __UpperCAmelCase ): __lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ): '''simple docstring''' __lowerCamelCase = sample __lowerCamelCase = self.encode(__UpperCAmelCase ).latent_dist if sample_posterior: __lowerCamelCase = posterior.sample(generator=__UpperCAmelCase ) else: __lowerCamelCase = posterior.mode() __lowerCamelCase = self.decode(__UpperCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase )
330
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { """configuration_instructblip""": [ """INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InstructBlipConfig""", """InstructBlipQFormerConfig""", """InstructBlipVisionConfig""", ], """processing_instructblip""": ["""InstructBlipProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ """INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """InstructBlipQFormerModel""", """InstructBlipPreTrainedModel""", """InstructBlipForConditionalGeneration""", """InstructBlipVisionModel""", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration a_ = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] a_ = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] a_ = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) a_ = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) a_ = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ): for tf_name, hf_name in patterns: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase ) __lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() __lowerCamelCase = {} # separating decoder weights __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = DECODER_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = REMAINING_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" __lowerCamelCase = mapping['''model.embed_positions.weight'''] __lowerCamelCase = mapping.pop('''model.embed_positions.weight''' ) __lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : int ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ): __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() a_ = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
330
1
import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=4 , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_choices def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_attention_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs __lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs __lowerCamelCase = True __lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = True lowerCAmelCase__ = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = FlaxBertModelTester(self ) @slow def lowerCamelCase ( self ): '''simple docstring''' # Only check this for base model, not necessary for all model classes. # This will also help speed-up tests. __lowerCamelCase = FlaxBertModel.from_pretrained('''bert-base-cased''' ) __lowerCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(__UpperCAmelCase )
330
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch a_ = logging.get_logger(__name__) class __lowerCAmelCase : def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase=None ): '''simple docstring''' if not conversation_id: __lowerCamelCase = uuid.uuida() if past_user_inputs is None: __lowerCamelCase = [] if generated_responses is None: __lowerCamelCase = [] __lowerCamelCase = conversation_id __lowerCamelCase = past_user_inputs __lowerCamelCase = generated_responses __lowerCamelCase = text def __eq__( self , __UpperCAmelCase ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) __lowerCamelCase = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: __lowerCamelCase = text def lowerCamelCase ( self ): '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __lowerCamelCase = None def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' self.generated_responses.append(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): '''simple docstring''' __lowerCamelCase = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): __lowerCamelCase = '''user''' if is_user else '''bot''' output += F"""{name} >> {text} \n""" return output @add_end_docstrings( lowerCAmelCase__ , r""" min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. """ , ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: __lowerCamelCase = self.tokenizer.eos_token def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = {} __lowerCamelCase = {} __lowerCamelCase = {} if min_length_for_response is not None: __lowerCamelCase = min_length_for_response if minimum_tokens is not None: __lowerCamelCase = minimum_tokens if "max_length" in generate_kwargs: __lowerCamelCase = generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __lowerCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self , __UpperCAmelCase , __UpperCAmelCase=0 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = super().__call__(__UpperCAmelCase , num_workers=__UpperCAmelCase , **__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=32 ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ): __lowerCamelCase = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version __lowerCamelCase = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": __lowerCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": __lowerCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=10 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) __lowerCamelCase = model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) __lowerCamelCase = max_length - minimum_tokens __lowerCamelCase = model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: __lowerCamelCase = model_inputs['''attention_mask'''][:, -trim:] __lowerCamelCase = model_inputs.pop('''conversation''' ) __lowerCamelCase = max_length __lowerCamelCase = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase ) if self.model.config.is_encoder_decoder: __lowerCamelCase = 1 else: __lowerCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = model_outputs['''output_ids'''] __lowerCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , ) __lowerCamelCase = model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.tokenizer.eos_token_id __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: __lowerCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
330
1
def a__ ( ): return [list(range(10_00 - i ,-10_00 - i ,-1 ) ) for i in range(10_00 )] a_ = generate_large_matrix() a_ = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def a__ ( _UpperCamelCase : list[list[int]] ): assert all(row == sorted(_UpperCamelCase ,reverse=_UpperCamelCase ) for row in grid ) assert all(list(_UpperCamelCase ) == sorted(_UpperCamelCase ,reverse=_UpperCamelCase ) for col in zip(*_UpperCamelCase ) ) def a__ ( _UpperCamelCase : list[int] ): __lowerCamelCase = 0 __lowerCamelCase = len(_UpperCamelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: __lowerCamelCase = (left + right) // 2 __lowerCamelCase = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: __lowerCamelCase = mid + 1 else: __lowerCamelCase = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(_UpperCamelCase ) def a__ ( _UpperCamelCase : list[list[int]] ): __lowerCamelCase = 0 __lowerCamelCase = len(grid[0] ) for i in range(len(_UpperCamelCase ) ): __lowerCamelCase = find_negative_index(grid[i][:bound] ) total += bound return (len(_UpperCamelCase ) * len(grid[0] )) - total def a__ ( _UpperCamelCase : list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def a__ ( _UpperCamelCase : list[list[int]] ): __lowerCamelCase = 0 for row in grid: for i, number in enumerate(_UpperCamelCase ): if number < 0: total += len(_UpperCamelCase ) - i break return total def a__ ( ): from timeit import timeit print('''Running benchmarks''' ) __lowerCamelCase = ( '''from __main__ import count_negatives_binary_search, ''' '''count_negatives_brute_force, count_negatives_brute_force_with_break, grid''' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): __lowerCamelCase = timeit(F"""{func}(grid=grid)""" ,setup=_UpperCamelCase ,number=5_00 ) print(F"""{func}() took {time:0.4f} seconds""" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
330
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a_ = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["""memory_attention""", """encoder_attn"""], ["""attention""", """attn"""], ["""/""", """."""], [""".LayerNorm.gamma""", """_layer_norm.weight"""], [""".LayerNorm.beta""", """_layer_norm.bias"""], ["""r.layer_""", """r.layers."""], ["""output_proj""", """out_proj"""], ["""ffn.dense_1.""", """fc2."""], ["""ffn.dense.""", """fc1."""], ["""ffn_layer_norm""", """final_layer_norm"""], ["""kernel""", """weight"""], ["""encoder_layer_norm.""", """encoder.layer_norm."""], ["""decoder_layer_norm.""", """decoder.layer_norm."""], ["""embeddings.weights""", """shared.weight"""], ] def a__ ( _UpperCamelCase : int ): for pegasus_name, hf_name in PATTERNS: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = DEFAULTS.copy() cfg_kwargs.update(_UpperCamelCase ) __lowerCamelCase = PegasusConfig(**_UpperCamelCase ) __lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.model.state_dict() __lowerCamelCase = {} for k, v in tf_weights.items(): __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ) if new_k not in sd: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if "dense" in k or "proj" in new_k: __lowerCamelCase = v.T __lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}""" # make sure embedding.padding_idx is respected __lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] ) __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping} mapping.update(**_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''Adafactor''', '''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # save tokenizer first __lowerCamelCase = Path(_UpperCamelCase ).parent.name __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings'''] __lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(_UpperCamelCase ) # convert model __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""] if dataset == "large": __lowerCamelCase = task_specific_params __lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() sd.pop('''model.decoder.embed_positions.weight''' ) sd.pop('''model.encoder.embed_positions.weight''' ) torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() if args.save_dir is None: a_ = Path(args.tf_ckpt_path).parent.name a_ = os.path.join("""pegasus""", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
330
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a_ = { """configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""], """tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ """TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""", """AdaptiveEmbedding""", """TransfoXLForSequenceClassification""", """TransfoXLLMHeadModel""", """TransfoXLModel""", """TransfoXLPreTrainedModel""", """load_tf_weights_in_transfo_xl""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ """TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFAdaptiveEmbedding""", """TFTransfoXLForSequenceClassification""", """TFTransfoXLLMHeadModel""", """TFTransfoXLMainLayer""", """TFTransfoXLModel""", """TFTransfoXLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } a_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ): for attribute in key.split('''.''' ): __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ) if weight_type is not None: __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape else: __lowerCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value else: __lowerCamelCase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Any ): __lowerCamelCase = [] __lowerCamelCase = fairseq_model.state_dict() __lowerCamelCase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,) __lowerCamelCase = True else: for key, mapped_key in MAPPING.items(): __lowerCamelCase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2] __lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase ) if "weight_g" in name: __lowerCamelCase = '''weight_g''' elif "weight_v" in name: __lowerCamelCase = '''weight_v''' elif "bias" in name: __lowerCamelCase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCamelCase = '''weight''' else: __lowerCamelCase = None set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) continue if not is_used: unused_weights.append(_UpperCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ): __lowerCamelCase = full_name.split('''conv_layers.''' )[-1] __lowerCamelCase = name.split('''.''' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_UpperCamelCase ) @torch.no_grad() def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : List[Any]=True ): if config_path is not None: __lowerCamelCase = UniSpeechSatConfig.from_pretrained(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatConfig() __lowerCamelCase = '''''' if is_finetuned: __lowerCamelCase = UniSpeechSatForCTC(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatForPreTraining(_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) __lowerCamelCase = model[0].eval() recursively_load_weights(_UpperCamelCase ,_UpperCamelCase ) hf_wavavec.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) a_ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
330
1
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) a_ = logging.getLogger(__name__) @dataclass(frozen=lowerCAmelCase__ ) class __lowerCAmelCase : lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None @dataclass(frozen=lowerCAmelCase__ ) class __lowerCAmelCase : lowerCAmelCase__ = 42 lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None if is_torch_available(): import torch from torch.utils.data import Dataset class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = 42 def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase=False , __UpperCAmelCase = False , ): '''simple docstring''' __lowerCamelCase = hans_processors[task]() __lowerCamelCase = os.path.join( __UpperCAmelCase , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(__UpperCAmelCase ) , __UpperCAmelCase , ) , ) __lowerCamelCase = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __lowerCamelCase ,__lowerCamelCase = label_list[2], label_list[1] __lowerCamelCase = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowerCamelCase = cached_features_file + '''.lock''' with FileLock(__UpperCAmelCase ): if os.path.exists(__UpperCAmelCase ) and not overwrite_cache: logger.info(F"""Loading features from cached file {cached_features_file}""" ) __lowerCamelCase = torch.load(__UpperCAmelCase ) else: logger.info(F"""Creating features from dataset file at {data_dir}""" ) __lowerCamelCase = ( processor.get_dev_examples(__UpperCAmelCase ) if evaluate else processor.get_train_examples(__UpperCAmelCase ) ) logger.info('''Training examples: %s''' , len(__UpperCAmelCase ) ) __lowerCamelCase = hans_convert_examples_to_features(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) logger.info('''Saving features into cached file %s''' , __UpperCAmelCase ) torch.save(self.features , __UpperCAmelCase ) def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self , __UpperCAmelCase ): '''simple docstring''' return self.features[i] def lowerCamelCase ( self ): '''simple docstring''' return self.label_list if is_tf_available(): import tensorflow as tf class __lowerCAmelCase : lowerCAmelCase__ = 42 def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 128 , __UpperCAmelCase=False , __UpperCAmelCase = False , ): '''simple docstring''' __lowerCamelCase = hans_processors[task]() __lowerCamelCase = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __lowerCamelCase ,__lowerCamelCase = label_list[2], label_list[1] __lowerCamelCase = label_list __lowerCamelCase = processor.get_dev_examples(__UpperCAmelCase ) if evaluate else processor.get_train_examples(__UpperCAmelCase ) __lowerCamelCase = hans_convert_examples_to_features(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 10000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(__UpperCAmelCase )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __lowerCamelCase = tf.data.Dataset.from_generator( __UpperCAmelCase , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def lowerCamelCase ( self ): '''simple docstring''' return self.dataset def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self , __UpperCAmelCase ): '''simple docstring''' return self.features[i] def lowerCamelCase ( self ): '''simple docstring''' return self.label_list class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(__UpperCAmelCase , '''heuristics_train_set.txt''' ) ) , '''train''' ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(__UpperCAmelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def lowerCamelCase ( self ): '''simple docstring''' return ["contradiction", "entailment", "neutral"] def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = [] for i, line in enumerate(__UpperCAmelCase ): if i == 0: continue __lowerCamelCase = '''%s-%s''' % (set_type, line[0]) __lowerCamelCase = line[5] __lowerCamelCase = line[6] __lowerCamelCase = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __lowerCamelCase = line[0] examples.append(InputExample(guid=__UpperCAmelCase , text_a=__UpperCAmelCase , text_b=__UpperCAmelCase , label=__UpperCAmelCase , pairID=__UpperCAmelCase ) ) return examples def a__ ( _UpperCamelCase : List[InputExample] ,_UpperCamelCase : List[str] ,_UpperCamelCase : int ,_UpperCamelCase : PreTrainedTokenizer ,): __lowerCamelCase = {label: i for i, label in enumerate(_UpperCamelCase )} __lowerCamelCase = [] for ex_index, example in tqdm.tqdm(enumerate(_UpperCamelCase ) ,desc='''convert examples to features''' ): if ex_index % 1_00_00 == 0: logger.info('''Writing example %d''' % (ex_index) ) __lowerCamelCase = tokenizer( example.text_a ,example.text_b ,add_special_tokens=_UpperCamelCase ,max_length=_UpperCamelCase ,padding='''max_length''' ,truncation=_UpperCamelCase ,return_overflowing_tokens=_UpperCamelCase ,) __lowerCamelCase = label_map[example.label] if example.label in label_map else 0 __lowerCamelCase = int(example.pairID ) features.append(InputFeatures(**_UpperCamelCase ,label=_UpperCamelCase ,pairID=_UpperCamelCase ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(F"""guid: {example}""" ) logger.info(F"""features: {features[i]}""" ) return features a_ = { """hans""": 3, } a_ = { """hans""": HansProcessor, }
330
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING a_ = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase__ ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type(__UpperCAmelCase ) def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , **__UpperCAmelCase ): '''simple docstring''' return {}, {}, {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = load_image(__UpperCAmelCase ) __lowerCamelCase = image.size __lowerCamelCase = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework ) return model_inputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.model(**__UpperCAmelCase ) return model_outputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = model_outputs.predicted_depth __lowerCamelCase = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=__UpperCAmelCase ) __lowerCamelCase = prediction.squeeze().cpu().numpy() __lowerCamelCase = (output * 255 / np.max(__UpperCAmelCase )).astype('''uint8''' ) __lowerCamelCase = Image.fromarray(__UpperCAmelCase ) __lowerCamelCase = {} __lowerCamelCase = predicted_depth __lowerCamelCase = depth return output_dict
330
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""pixel_values"""] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __lowerCamelCase = size if size is not None else {'''shortest_edge''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __lowerCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = resample __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_normalize __lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD __lowerCamelCase = do_convert_rgb def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowerCamelCase = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = size if size is not None else self.size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = resample if resample is not None else self.resample __lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCamelCase = crop_size if crop_size is not None else self.crop_size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase = image_mean if image_mean is not None else self.image_mean __lowerCamelCase = image_std if image_std is not None else self.image_std __lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowerCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowerCamelCase = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: __lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: __lowerCamelCase = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: __lowerCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] __lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __lowerCamelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
330
1
class __lowerCAmelCase : # Public class to implement a graph def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = row __lowerCamelCase = col __lowerCamelCase = graph def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # Checking all 8 elements surrounding nth element __lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1] __lowerCamelCase = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __UpperCAmelCase ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , __UpperCAmelCase ) def lowerCamelCase ( self ): # And finally, count all islands. '''simple docstring''' __lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )] __lowerCamelCase = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) count += 1 return count
330
from __future__ import annotations from typing import Generic, TypeVar a_ = TypeVar("""T""") class __lowerCAmelCase ( Generic[T] ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = data __lowerCamelCase = self __lowerCamelCase = 0 class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # map from node name to the node object __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # create a new set with x as its member __lowerCamelCase = DisjointSetTreeNode(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # find the set x belongs to (with path-compression) __lowerCamelCase = self.map[data] if elem_ref != elem_ref.parent: __lowerCamelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # helper function for union operation if nodea.rank > nodea.rank: __lowerCamelCase = nodea else: __lowerCamelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # merge 2 disjoint sets self.link(self.find_set(__UpperCAmelCase ) , self.find_set(__UpperCAmelCase ) ) class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # connections: map from the node to the neighbouring nodes (with weights) __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # add a node ONLY if its not present in the graph if node not in self.connections: __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # add an edge with the given weight self.add_node(__UpperCAmelCase ) self.add_node(__UpperCAmelCase ) __lowerCamelCase = weight __lowerCamelCase = weight def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __UpperCAmelCase : x[2] ) # creating the disjoint set __lowerCamelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__UpperCAmelCase ) # MST generation __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = edges[index] index += 1 __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) disjoint_set.union(__UpperCAmelCase , __UpperCAmelCase ) return graph
330
1
import numpy as np def a__ ( _UpperCamelCase : np.ndarray ,_UpperCamelCase : np.ndarray ,_UpperCamelCase : float = 1e-12 ,_UpperCamelCase : int = 1_00 ,): assert np.shape(_UpperCamelCase )[0] == np.shape(_UpperCamelCase )[1] # Ensure proper dimensionality. assert np.shape(_UpperCamelCase )[0] == np.shape(_UpperCamelCase )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(_UpperCamelCase ) == np.iscomplexobj(_UpperCamelCase ) __lowerCamelCase = np.iscomplexobj(_UpperCamelCase ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(_UpperCamelCase ,input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __lowerCamelCase = False __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = 1e12 while not convergence: # Multiple matrix by the vector. __lowerCamelCase = np.dot(_UpperCamelCase ,_UpperCamelCase ) # Normalize the resulting output vector. __lowerCamelCase = w / np.linalg.norm(_UpperCamelCase ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __lowerCamelCase = vector.conj().T if is_complex else vector.T __lowerCamelCase = np.dot(_UpperCamelCase ,np.dot(_UpperCamelCase ,_UpperCamelCase ) ) # Check convergence. __lowerCamelCase = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __lowerCamelCase = True __lowerCamelCase = lambda_ if is_complex: __lowerCamelCase = np.real(lambda_ ) return lambda_, vector def a__ ( ): __lowerCamelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) __lowerCamelCase = np.array([41, 4, 20] ) __lowerCamelCase = real_input_matrix.astype(np.complexaaa ) __lowerCamelCase = np.triu(1j * complex_input_matrix ,1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __lowerCamelCase = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": __lowerCamelCase = real_input_matrix __lowerCamelCase = real_vector elif problem_type == "complex": __lowerCamelCase = complex_input_matrix __lowerCamelCase = complex_vector # Our implementation. __lowerCamelCase ,__lowerCamelCase = power_iteration(_UpperCamelCase ,_UpperCamelCase ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __lowerCamelCase ,__lowerCamelCase = np.linalg.eigh(_UpperCamelCase ) # Last eigenvalue is the maximum one. __lowerCamelCase = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __lowerCamelCase = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1e-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(_UpperCamelCase ) - np.abs(_UpperCamelCase ) ) <= 1e-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
330
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self ): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_config() __lowerCamelCase = 300 return config def lowerCamelCase ( self ): '''simple docstring''' ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = self.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = True __lowerCamelCase = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_choices __lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = () def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCamelCase = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason='''MRA does not output attentions''' ) def lowerCamelCase ( self ): '''simple docstring''' return @require_torch class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) __lowerCamelCase = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
1
def a__ ( _UpperCamelCase : float ,_UpperCamelCase : float ): if mass < 0: raise ValueError('''The mass of a body cannot be negative''' ) return 0.5 * mass * abs(_UpperCamelCase ) * abs(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
330
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
1
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) a_ = pytest.mark.integration @pytest.mark.parametrize('''path''' ,['''paws''', '''csv'''] ) def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ): inspect_dataset(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = path + '''.py''' assert script_name in os.listdir(_UpperCamelCase ) assert "__pycache__" not in os.listdir(_UpperCamelCase ) @pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.parametrize('''path''' ,['''accuracy'''] ) def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Optional[Any] ): inspect_metric(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = path + '''.py''' assert script_name in os.listdir(_UpperCamelCase ) assert "__pycache__" not in os.listdir(_UpperCamelCase ) @pytest.mark.parametrize( '''path, config_name, expected_splits''' ,[ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] ,) def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Tuple ,_UpperCamelCase : Any ): __lowerCamelCase = get_dataset_config_info(_UpperCamelCase ,config_name=_UpperCamelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' ,[ ('''paws''', None, ValueError), ] ,) def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Any ,_UpperCamelCase : Optional[int] ): with pytest.raises(_UpperCamelCase ): get_dataset_config_info(_UpperCamelCase ,config_name=_UpperCamelCase ) @pytest.mark.parametrize( '''path, expected''' ,[ ('''squad''', '''plain_text'''), ('''acronym_identification''', '''default'''), ('''lhoestq/squad''', '''plain_text'''), ('''lhoestq/test''', '''default'''), ('''lhoestq/demo1''', '''lhoestq--demo1'''), ('''dalle-mini/wit''', '''dalle-mini--wit'''), ] ,) def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Union[str, Any] ): __lowerCamelCase = get_dataset_config_names(_UpperCamelCase ) assert expected in config_names @pytest.mark.parametrize( '''path, expected_configs, expected_splits_in_first_config''' ,[ ('''squad''', ['''plain_text'''], ['''train''', '''validation''']), ('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']), ('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']), ] ,) def a__ ( _UpperCamelCase : int ,_UpperCamelCase : int ,_UpperCamelCase : Optional[Any] ): __lowerCamelCase = get_dataset_infos(_UpperCamelCase ) assert list(infos.keys() ) == expected_configs __lowerCamelCase = expected_configs[0] assert expected_config in infos __lowerCamelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( '''path, expected_config, expected_splits''' ,[ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] ,) def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : int ,_UpperCamelCase : List[str] ): __lowerCamelCase = get_dataset_infos(_UpperCamelCase ) assert expected_config in infos __lowerCamelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' ,[ ('''paws''', None, ValueError), ] ,) def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : List[Any] ,_UpperCamelCase : str ): with pytest.raises(_UpperCamelCase ): get_dataset_split_names(_UpperCamelCase ,config_name=_UpperCamelCase )
330
from string import ascii_lowercase, ascii_uppercase def a__ ( _UpperCamelCase : str ): if not sentence: return "" __lowerCamelCase = dict(zip(_UpperCamelCase ,_UpperCamelCase ) ) return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
330
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __lowerCAmelCase ( lowerCAmelCase__ ): @slow @require_torch def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' ) __lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) __lowerCamelCase = bertabert.config.encoder.vocab_size __lowerCamelCase = tokenizer.sep_token_id __lowerCamelCase = tokenizer.cls_token_id __lowerCamelCase = 128 __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' ) __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' ) __lowerCamelCase = train_dataset.select(range(32 ) ) __lowerCamelCase = val_dataset.select(range(16 ) ) __lowerCamelCase = 4 def _map_to_encoder_decoder_inputs(__UpperCAmelCase ): # Tokenizer will automatically set [BOS] <text> [EOS] __lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 ) __lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 ) __lowerCamelCase = inputs.input_ids __lowerCamelCase = inputs.attention_mask __lowerCamelCase = outputs.input_ids __lowerCamelCase = outputs.input_ids.copy() __lowerCamelCase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels'''] ] __lowerCamelCase = outputs.attention_mask assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(__UpperCAmelCase ): __lowerCamelCase = pred.label_ids __lowerCamelCase = pred.predictions # all unnecessary tokens are removed __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase ) return {"accuracy": accuracy} # map train dataset __lowerCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) train_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) # same for validation dataset __lowerCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) val_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) __lowerCamelCase = self.get_auto_remove_tmp_dir() __lowerCamelCase = SeqaSeqTrainingArguments( output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __lowerCamelCase = SeqaSeqTrainer( model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , ) # start training trainer.train()
330
1
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { """studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""", """studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""", } class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """luke""" def __init__( self , __UpperCAmelCase=50267 , __UpperCAmelCase=500000 , __UpperCAmelCase=768 , __UpperCAmelCase=256 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) __lowerCamelCase = vocab_size __lowerCamelCase = entity_vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = entity_emb_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = use_entity_aware_attention __lowerCamelCase = classifier_dropout
330
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
1
from collections.abc import Sequence def a__ ( _UpperCamelCase : Sequence[float] ,_UpperCamelCase : float ): return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def a__ ( _UpperCamelCase : Sequence[float] ,_UpperCamelCase : float ): __lowerCamelCase = 0.0 for coeff in reversed(_UpperCamelCase ): __lowerCamelCase = result * x + coeff return result if __name__ == "__main__": a_ = (0.0, 0.0, 5.0, 9.3, 7.0) a_ = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
330
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=1000 ): '''simple docstring''' __lowerCamelCase = p_stop __lowerCamelCase = max_length def __iter__( self ): '''simple docstring''' __lowerCamelCase = 0 __lowerCamelCase = False while not stop and count < self.max_length: yield count count += 1 __lowerCamelCase = random.random() < self.p_stop class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = [ BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 ) ] __lowerCamelCase = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] __lowerCamelCase = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ): '''simple docstring''' random.seed(__UpperCAmelCase ) __lowerCamelCase = list(__UpperCAmelCase ) __lowerCamelCase = [ IterableDatasetShard( __UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , ) for i in range(__UpperCAmelCase ) ] __lowerCamelCase = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(__UpperCAmelCase ) iterable_dataset_lists.append(list(__UpperCAmelCase ) ) __lowerCamelCase = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size __lowerCamelCase = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 ) __lowerCamelCase = [] for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(__UpperCAmelCase ) < len(__UpperCAmelCase ): reference += reference self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = 42 __lowerCamelCase = RandomIterableDataset() self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) # Edge case with a very small dataset __lowerCamelCase = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = SkipBatchSampler(__UpperCAmelCase , 2 ) self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 ) __lowerCamelCase = skip_first_batches(__UpperCAmelCase , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def lowerCamelCase ( self ): '''simple docstring''' Accelerator() __lowerCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
330
1
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { """facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """vit_mae""" def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=224 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=16 , __UpperCAmelCase=512 , __UpperCAmelCase=8 , __UpperCAmelCase=2048 , __UpperCAmelCase=0.75 , __UpperCAmelCase=False , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = qkv_bias __lowerCamelCase = decoder_num_attention_heads __lowerCamelCase = decoder_hidden_size __lowerCamelCase = decoder_num_hidden_layers __lowerCamelCase = decoder_intermediate_size __lowerCamelCase = mask_ratio __lowerCamelCase = norm_pix_loss
330
def a__ ( _UpperCamelCase : int ): __lowerCamelCase = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
330
1
from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ = """pixel_values""" lowerCAmelCase__ = False lowerCAmelCase__ = TimmBackboneConfig def __init__( self , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(self , '''timm''' ) super().__init__(__UpperCAmelCase ) __lowerCamelCase = config if config.backbone is None: raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' ) if config.backbone not in timm.list_models(): raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" ) if hasattr(__UpperCAmelCase , '''out_features''' ) and config.out_features is not None: raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' ) __lowerCamelCase = getattr(__UpperCAmelCase , '''use_pretrained_backbone''' , __UpperCAmelCase ) if pretrained is None: raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' ) # We just take the final layer by default. This matches the default for the transformers models. __lowerCamelCase = config.out_indices if getattr(__UpperCAmelCase , '''out_indices''' , __UpperCAmelCase ) is not None else (-1,) __lowerCamelCase = timm.create_model( config.backbone , pretrained=__UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=__UpperCAmelCase , **__UpperCAmelCase , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. __lowerCamelCase = self._backbone.return_layers __lowerCamelCase = {layer['''module''']: str(__UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(__UpperCAmelCase ) @classmethod def lowerCamelCase ( cls , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(cls , ['''vision''', '''timm'''] ) from ...models.timm_backbone import TimmBackboneConfig __lowerCamelCase = kwargs.pop('''config''' , TimmBackboneConfig() ) __lowerCamelCase = kwargs.pop('''use_timm_backbone''' , __UpperCAmelCase ) if not use_timm: raise ValueError('''use_timm_backbone must be True for timm backbones''' ) __lowerCamelCase = kwargs.pop('''num_channels''' , config.num_channels ) __lowerCamelCase = kwargs.pop('''features_only''' , config.features_only ) __lowerCamelCase = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone ) __lowerCamelCase = kwargs.pop('''out_indices''' , config.out_indices ) __lowerCamelCase = TimmBackboneConfig( backbone=__UpperCAmelCase , num_channels=__UpperCAmelCase , features_only=__UpperCAmelCase , use_pretrained_backbone=__UpperCAmelCase , out_indices=__UpperCAmelCase , ) return super()._from_config(__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' pass def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError('''Cannot output attentions for timm backbones at the moment''' ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone __lowerCamelCase = self._all_layers __lowerCamelCase = self._backbone(__UpperCAmelCase , **__UpperCAmelCase ) __lowerCamelCase = self._return_layers __lowerCamelCase = tuple(hidden_states[i] for i in self.out_indices ) else: __lowerCamelCase = self._backbone(__UpperCAmelCase , **__UpperCAmelCase ) __lowerCamelCase = None __lowerCamelCase = tuple(__UpperCAmelCase ) __lowerCamelCase = tuple(__UpperCAmelCase ) if hidden_states is not None else None if not return_dict: __lowerCamelCase = (feature_maps,) if output_hidden_states: __lowerCamelCase = output + (hidden_states,) return output return BackboneOutput(feature_maps=__UpperCAmelCase , hidden_states=__UpperCAmelCase , attentions=__UpperCAmelCase )
330
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ = 16 a_ = 32 def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : int = 16 ): __lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' ) def tokenize_function(_UpperCamelCase : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCamelCase = datasets.map( _UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' ) def collate_fn(_UpperCamelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCamelCase = 16 elif accelerator.mixed_precision != "no": __lowerCamelCase = 8 else: __lowerCamelCase = None return tokenizer.pad( _UpperCamelCase ,padding='''longest''' ,max_length=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_tensors='''pt''' ,) # Instantiate dataloaders. __lowerCamelCase = DataLoader( tokenized_datasets['''train'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) __lowerCamelCase = DataLoader( tokenized_datasets['''validation'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders a_ = mocked_dataloaders # noqa: F811 def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,_UpperCamelCase ) == "1": __lowerCamelCase = 2 # Initialize accelerator __lowerCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCamelCase = config['''lr'''] __lowerCamelCase = int(config['''num_epochs'''] ) __lowerCamelCase = int(config['''seed'''] ) __lowerCamelCase = int(config['''batch_size'''] ) __lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=_UpperCamelCase ) def inner_training_loop(_UpperCamelCase : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(_UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCamelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCamelCase = AdamW(params=model.parameters() ,lr=_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = get_dataloaders(_UpperCamelCase ,_UpperCamelCase ) # Instantiate scheduler __lowerCamelCase = get_linear_schedule_with_warmup( optimizer=_UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCamelCase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) # Now we train the model for epoch in range(_UpperCamelCase ): model.train() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.loss accelerator.backward(_UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.logits.argmax(dim=-1 ) __lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_UpperCamelCase ,references=_UpperCamelCase ,) __lowerCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" ,_UpperCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a__ ( ): __lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' ,) parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_UpperCamelCase ,_UpperCamelCase ) if __name__ == "__main__": main()
330
1
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=2 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=36 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=6 , __UpperCAmelCase=6 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=1000 , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = num_channels __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = coordinate_size __lowerCamelCase = shape_size __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope __lowerCamelCase = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __lowerCamelCase = text_seq_length __lowerCamelCase = (image_size // patch_size) ** 2 + 1 __lowerCamelCase = self.text_seq_length + self.image_seq_length def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __lowerCamelCase = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __lowerCamelCase = bbox[i, j, 3] __lowerCamelCase = bbox[i, j, 1] __lowerCamelCase = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __lowerCamelCase = bbox[i, j, 2] __lowerCamelCase = bbox[i, j, 0] __lowerCamelCase = tmp_coordinate __lowerCamelCase = tf.constant(__UpperCAmelCase ) __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.text_seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __lowerCamelCase = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TFLayoutLMvaModel(config=__UpperCAmelCase ) # text + image __lowerCamelCase = model(__UpperCAmelCase , pixel_values=__UpperCAmelCase , training=__UpperCAmelCase ) __lowerCamelCase = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , training=__UpperCAmelCase , ) __lowerCamelCase = model(__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , training=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __lowerCamelCase = model(__UpperCAmelCase , training=__UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __lowerCamelCase = model({'''pixel_values''': pixel_values} , training=__UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = TFLayoutLMvaForSequenceClassification(config=__UpperCAmelCase ) __lowerCamelCase = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = TFLayoutLMvaForTokenClassification(config=__UpperCAmelCase ) __lowerCamelCase = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = 2 __lowerCamelCase = TFLayoutLMvaForQuestionAnswering(config=__UpperCAmelCase ) __lowerCamelCase = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , training=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() ((__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase)) = config_and_inputs __lowerCamelCase = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowerCAmelCase__ = ( {"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel} if is_tf_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return True def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = copy.deepcopy(__UpperCAmelCase ) if model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = { k: tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__UpperCAmelCase , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __lowerCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TFLayoutLMvaModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) if getattr(__UpperCAmelCase , '''hf_compute_loss''' , __UpperCAmelCase ): # The number of elements in the loss should be the same as the number of elements in the label __lowerCamelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase ) __lowerCamelCase = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__UpperCAmelCase )[0] ] __lowerCamelCase = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __lowerCamelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase ) __lowerCamelCase = prepared_for_class.pop('''input_ids''' ) __lowerCamelCase = model(__UpperCAmelCase , **__UpperCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __lowerCamelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase ) __lowerCamelCase = prepared_for_class.pop('''input_ids''' ) if "labels" in prepared_for_class: __lowerCamelCase = prepared_for_class['''labels'''].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __lowerCamelCase = -100 __lowerCamelCase = tf.convert_to_tensor(__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase , **__UpperCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __lowerCamelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __lowerCamelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase ) # Get keys that were added with the _prepare_for_class function __lowerCamelCase = prepared_for_class.keys() - inputs_dict.keys() __lowerCamelCase = inspect.signature(model.call ).parameters __lowerCamelCase = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __lowerCamelCase = {0: '''input_ids'''} for label_key in label_keys: __lowerCamelCase = signature_names.index(__UpperCAmelCase ) __lowerCamelCase = label_key __lowerCamelCase = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __lowerCamelCase = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __lowerCamelCase = prepared_for_class[value] __lowerCamelCase = tuple(__UpperCAmelCase ) # Send to model __lowerCamelCase = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowerCamelCase ( self ): '''simple docstring''' ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCamelCase = type self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = TFLayoutLMvaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def a__ ( ): __lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase ) if is_vision_available() else None @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='''tf''' ).pixel_values __lowerCamelCase = tf.constant([[1, 2]] ) __lowerCamelCase = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __lowerCamelCase = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , training=__UpperCAmelCase ) # verify the logits __lowerCamelCase = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , __UpperCAmelCase ) __lowerCamelCase = tf.constant( [[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
import logging import os import threading import time try: import warnings except ImportError: a_ = None try: import msvcrt except ImportError: a_ = None try: import fcntl except ImportError: a_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: a_ = OSError # Data # ------------------------------------------------ a_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] a_ = """3.0.12""" a_ = None def a__ ( ): global _logger __lowerCamelCase = _logger or logging.getLogger(__name__ ) return _logger class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock_file return None def __str__( self ): '''simple docstring''' __lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class __lowerCAmelCase : def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock return None def __enter__( self ): '''simple docstring''' return self.lock def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.lock.release() return None class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase ) # The path to the lock file. __lowerCamelCase = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __lowerCamelCase = None # The default timeout value. __lowerCamelCase = timeout # We use this lock primarily for the lock counter. __lowerCamelCase = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __lowerCamelCase = 0 return None @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file @property def lowerCamelCase ( self ): '''simple docstring''' return self._timeout @timeout.setter def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = float(__UpperCAmelCase ) return None def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file_fd is not None def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ): '''simple docstring''' # Use the default timeout, if no timeout is provided. if timeout is None: __lowerCamelCase = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file __lowerCamelCase = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(__UpperCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __lowerCamelCase = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowerCamelCase ( self , __UpperCAmelCase=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __lowerCamelCase = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self ): '''simple docstring''' self.acquire() return self def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.release() return None def __del__( self ): '''simple docstring''' self.release(force=__UpperCAmelCase ) return None def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = os.path.basename(__UpperCAmelCase ) if len(__UpperCAmelCase ) > max_length and max_length > 0: __lowerCamelCase = os.path.dirname(__UpperCAmelCase ) __lowerCamelCase = str(hash(__UpperCAmelCase ) ) __lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(__UpperCAmelCase , __UpperCAmelCase ) else: return path class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) __lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: try: msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(__UpperCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) try: fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN ) os.close(__UpperCAmelCase ) return None class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' os.close(self._lock_file_fd ) __lowerCamelCase = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None a_ = None if msvcrt: a_ = WindowsFileLock elif fcntl: a_ = UnixFileLock else: a_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
330
1
# Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() a_ = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model a_ = { # fairseq: """wmt19-ru-en""": {"""length_penalty""": 1.1}, """wmt19-en-ru""": {"""length_penalty""": 1.15}, """wmt19-en-de""": {"""length_penalty""": 1.0}, """wmt19-de-en""": {"""length_penalty""": 1.1}, # allenai: """wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6}, """wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6}, """wmt16-en-de-12-1""": {"""length_penalty""": 0.8}, """wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6}, """wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6}, } # this remaps the different models to their organization names a_ = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: a_ = """facebook""" for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: a_ = """allenai""" def a__ ( _UpperCamelCase : Union[str, Any] ): # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} __lowerCamelCase = dict((re.sub(R'''@@$''' ,'''''' ,_UpperCamelCase ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' ,'''</w>''' ,_UpperCamelCase ), v) for k, v in d.items() ) __lowerCamelCase = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[F"""{k}</w>"""] __lowerCamelCase = d[k] # restore return da def a__ ( _UpperCamelCase : str ,_UpperCamelCase : List[str] ): # prep assert os.path.exists(_UpperCamelCase ) os.makedirs(_UpperCamelCase ,exist_ok=_UpperCamelCase ) print(F"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models __lowerCamelCase = basename(_UpperCamelCase ) __lowerCamelCase = dirname(_UpperCamelCase ) __lowerCamelCase = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel __lowerCamelCase = cls.hub_models() __lowerCamelCase = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''} __lowerCamelCase = '''.''' # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(F"""using checkpoint {checkpoint_file}""" ) __lowerCamelCase = hub_utils.from_pretrained( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,archive_map=_UpperCamelCase ,**_UpperCamelCase ) __lowerCamelCase = vars(chkpt['''args''']['''model'''] ) __lowerCamelCase = args['''source_lang'''] __lowerCamelCase = args['''target_lang'''] __lowerCamelCase = dirname(_UpperCamelCase ) __lowerCamelCase = basename(_UpperCamelCase ) # dicts __lowerCamelCase = os.path.join(_UpperCamelCase ,F"""dict.{src_lang}.txt""" ) __lowerCamelCase = os.path.join(_UpperCamelCase ,F"""dict.{tgt_lang}.txt""" ) __lowerCamelCase = Dictionary.load(_UpperCamelCase ) __lowerCamelCase = rewrite_dict_keys(src_dict.indices ) __lowerCamelCase = len(_UpperCamelCase ) __lowerCamelCase = os.path.join(_UpperCamelCase ,'''vocab-src.json''' ) print(F"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" ) with open(_UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(_UpperCamelCase ,ensure_ascii=_UpperCamelCase ,indent=_UpperCamelCase ) ) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab __lowerCamelCase = True for k in src_vocab.keys(): if not k.islower(): __lowerCamelCase = False break __lowerCamelCase = Dictionary.load(_UpperCamelCase ) __lowerCamelCase = rewrite_dict_keys(tgt_dict.indices ) __lowerCamelCase = len(_UpperCamelCase ) __lowerCamelCase = os.path.join(_UpperCamelCase ,'''vocab-tgt.json''' ) print(F"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" ) with open(_UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(_UpperCamelCase ,ensure_ascii=_UpperCamelCase ,indent=_UpperCamelCase ) ) # merges_file (bpecodes) __lowerCamelCase = os.path.join(_UpperCamelCase ,VOCAB_FILES_NAMES['''merges_file'''] ) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" __lowerCamelCase = os.path.join(_UpperCamelCase ,_UpperCamelCase ) if os.path.exists(_UpperCamelCase ): break with open(_UpperCamelCase ,encoding='''utf-8''' ) as fin: __lowerCamelCase = fin.read() __lowerCamelCase = re.sub(R''' \d+$''' ,'''''' ,_UpperCamelCase ,0 ,re.M ) # remove frequency number print(F"""Generating {merges_file}""" ) with open(_UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as fout: fout.write(_UpperCamelCase ) # model config __lowerCamelCase = os.path.join(_UpperCamelCase ,'''config.json''' ) # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", F"""need to extend tokenizer to support bpe={args["bpe"]}""" assert args["tokenizer"] == "moses", F"""need to extend tokenizer to support bpe={args["tokenizer"]}""" __lowerCamelCase = { '''architectures''': ['''FSMTForConditionalGeneration'''], '''model_type''': '''fsmt''', '''activation_dropout''': args['''activation_dropout'''], '''activation_function''': '''relu''', '''attention_dropout''': args['''attention_dropout'''], '''d_model''': args['''decoder_embed_dim'''], '''dropout''': args['''dropout'''], '''init_std''': 0.02, '''max_position_embeddings''': args['''max_source_positions'''], '''num_hidden_layers''': args['''encoder_layers'''], '''src_vocab_size''': src_vocab_size, '''tgt_vocab_size''': tgt_vocab_size, '''langs''': [src_lang, tgt_lang], '''encoder_attention_heads''': args['''encoder_attention_heads'''], '''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''], '''encoder_layerdrop''': args['''encoder_layerdrop'''], '''encoder_layers''': args['''encoder_layers'''], '''decoder_attention_heads''': args['''decoder_attention_heads'''], '''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''], '''decoder_layerdrop''': args['''decoder_layerdrop'''], '''decoder_layers''': args['''decoder_layers'''], '''bos_token_id''': 0, '''pad_token_id''': 1, '''eos_token_id''': 2, '''is_encoder_decoder''': True, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_all_embeddings'''], } # good hparam defaults to start with __lowerCamelCase = 5 __lowerCamelCase = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: __lowerCamelCase = best_score_hparams[model_dir]['''length_penalty'''] else: __lowerCamelCase = 1.0 print(F"""Generating {fsmt_model_config_file}""" ) with open(_UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(_UpperCamelCase ,ensure_ascii=_UpperCamelCase ,indent=_UpperCamelCase ) ) # tokenizer config __lowerCamelCase = os.path.join(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = { '''langs''': [src_lang, tgt_lang], '''model_max_length''': 10_24, '''do_lower_case''': do_lower_case, } print(F"""Generating {fsmt_tokenizer_config_file}""" ) with open(_UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(_UpperCamelCase ,ensure_ascii=_UpperCamelCase ,indent=_UpperCamelCase ) ) # model __lowerCamelCase = chkpt['''models'''][0] __lowerCamelCase = model.state_dict() # rename keys to start with 'model.' __lowerCamelCase = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() ) # remove unneeded keys __lowerCamelCase = [ '''model.model''', '''model.encoder.version''', '''model.decoder.version''', '''model.encoder_embed_tokens.weight''', '''model.decoder_embed_tokens.weight''', '''model.encoder.embed_positions._float_tensor''', '''model.decoder.embed_positions._float_tensor''', ] for k in ignore_keys: model_state_dict.pop(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = FSMTConfig.from_pretrained(_UpperCamelCase ) __lowerCamelCase = FSMTForConditionalGeneration(_UpperCamelCase ) # check that it loads ok model_new.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) # save __lowerCamelCase = os.path.join(_UpperCamelCase ,_UpperCamelCase ) print(F"""Generating {pytorch_weights_dump_path}""" ) torch.save(_UpperCamelCase ,_UpperCamelCase ) print('''Conversion is done!''' ) print('''\nLast step is to upload the files to s3''' ) print(F"""cd {data_root}""" ) print(F"""transformers-cli upload {model_dir}""" ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--fsmt_checkpoint_path""", default=None, type=str, required=True, help=( """Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,""" """ bpecodes, etc.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
330
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase="divided_space_time" , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = num_channels __lowerCamelCase = patch_size __lowerCamelCase = num_frames __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = attention_type __lowerCamelCase = initializer_range __lowerCamelCase = scope __lowerCamelCase = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __lowerCamelCase = (image_size // patch_size) ** 2 __lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __lowerCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __lowerCamelCase = self.num_labels return config def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) # verify the logits shape __lowerCamelCase = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs __lowerCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowerCAmelCase__ = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerModelTester(self ) __lowerCamelCase = ConfigTester( self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = copy.deepcopy(__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def lowerCamelCase ( self ): '''simple docstring''' pass def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = TimesformerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' if not self.has_attentions: pass else: __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase = True for model_class in self.all_model_classes: __lowerCamelCase = self.model_tester.seq_length __lowerCamelCase = self.model_tester.num_frames __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __lowerCamelCase = len(__UpperCAmelCase ) # Check attention is always last and order is fine __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(out_len + 1 , len(__UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowerCamelCase ( self ): '''simple docstring''' def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.hidden_states __lowerCamelCase = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) __lowerCamelCase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def a__ ( ): __lowerCamelCase = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' ) __lowerCamelCase = np.load(_UpperCamelCase ) return list(_UpperCamelCase ) @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self ): '''simple docstring''' # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( __UpperCAmelCase ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_video() __lowerCamelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**__UpperCAmelCase ) # verify the logits __lowerCamelCase = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
1
import math from collections.abc import Iterator from itertools import takewhile def a__ ( _UpperCamelCase : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(_UpperCamelCase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a__ ( ): __lowerCamelCase = 2 while True: if is_prime(_UpperCamelCase ): yield num num += 1 def a__ ( _UpperCamelCase : int = 2_00_00_00 ): return sum(takewhile(lambda _UpperCamelCase : x < n ,prime_generator() ) ) if __name__ == "__main__": print(f"{solution() = }")
330
def a__ ( _UpperCamelCase : int ): if not isinstance(_UpperCamelCase ,_UpperCamelCase ): __lowerCamelCase = F"""Input value of [number={number}] must be an integer""" raise TypeError(_UpperCamelCase ) if number < 0: return False __lowerCamelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
330
1
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=9 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.002 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=None , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = encoder_seq_length __lowerCamelCase = decoder_seq_length # For common tests __lowerCamelCase = self.decoder_seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = d_ff __lowerCamelCase = relative_attention_num_buckets __lowerCamelCase = dropout_rate __lowerCamelCase = initializer_factor __lowerCamelCase = eos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = decoder_start_token_id __lowerCamelCase = None __lowerCamelCase = decoder_layers def lowerCamelCase ( self ): '''simple docstring''' return TaConfig.from_pretrained('''google/umt5-base''' ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ): '''simple docstring''' if attention_mask is None: __lowerCamelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __lowerCamelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCAmelCase ) if decoder_head_mask is None: __lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase ) if cross_attn_head_mask is None: __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = self.get_config() __lowerCamelCase = config.num_attention_heads __lowerCamelCase = self.prepare_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, input_dict def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.prepare_config_and_inputs() return config, inputs_dict def lowerCamelCase ( self ): '''simple docstring''' return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def lowerCamelCase ( self ): '''simple docstring''' return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = UMTaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , ) __lowerCamelCase = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ) __lowerCamelCase = result.last_hidden_state __lowerCamelCase = result.past_key_values __lowerCamelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(__UpperCAmelCase ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = UMTaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval() # first forward pass __lowerCamelCase = model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 ) __lowerCamelCase ,__lowerCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCamelCase = model(__UpperCAmelCase )['''last_hidden_state'''] __lowerCamelCase = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )['''last_hidden_state'''] # select random slice __lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach() __lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = UMTaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).half().eval() __lowerCamelCase = model(**__UpperCAmelCase )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(__UpperCAmelCase ).any().item() ) @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) lowerCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else () lowerCAmelCase__ = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = True lowerCAmelCase__ = True # The small UMT5 model needs higher percentages for CPU/MP tests lowerCAmelCase__ = [0.8, 0.9] def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(__UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( __UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=__UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = config_and_inputs[0] __lowerCamelCase = UMTaForConditionalGeneration(__UpperCAmelCase ).eval() model.to(__UpperCAmelCase ) __lowerCamelCase = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__UpperCAmelCase ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ), } for attn_name, (name, mask) in zip(__UpperCAmelCase , head_masking.items() ): __lowerCamelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ) __lowerCamelCase = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , **__UpperCAmelCase , ) # We check the state of decoder_attentions and cross_attentions just from the last step __lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def lowerCamelCase ( self ): '''simple docstring''' pass @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__UpperCAmelCase , legacy=__UpperCAmelCase ) __lowerCamelCase = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] __lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase ).input_ids # fmt: off __lowerCamelCase = torch.tensor( [ [ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = model.generate(input_ids.to(__UpperCAmelCase ) ) __lowerCamelCase = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
330
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={"_".join([str(__UpperCAmelCase ) for s in shape] )}.npy""" def lowerCamelCase ( self ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 4, 64, 64) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return image def lowerCamelCase ( self , __UpperCAmelCase=False , __UpperCAmelCase="CompVis/stable-diffusion-v1-4" ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = '''bf16''' if fpaa else None __lowerCamelCase ,__lowerCamelCase = FlaxUNetaDConditionModel.from_pretrained( __UpperCAmelCase , subfolder='''unet''' , dtype=__UpperCAmelCase , revision=__UpperCAmelCase ) return model, params def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 77, 768) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , shape=(4, 4, 96, 96) , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , shape=(4, 77, 1024) , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
330
1
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = 42 class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ = True @register_to_config def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (64,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 4 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 0.18_215 , ): '''simple docstring''' super().__init__() # pass init params to Encoder __lowerCamelCase = Encoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , ) # pass init params to Decoder __lowerCamelCase = Decoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , ) __lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __lowerCamelCase = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 ) __lowerCamelCase = False __lowerCamelCase = False # only relevant if vae tiling is enabled __lowerCamelCase = self.config.sample_size __lowerCamelCase = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __lowerCamelCase = 0.25 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' if isinstance(__UpperCAmelCase , (Encoder, Decoder) ): __lowerCamelCase = value def lowerCamelCase ( self , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = use_tiling def lowerCamelCase ( self ): '''simple docstring''' self.enable_tiling(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = True def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = {} def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): __lowerCamelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return processors def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = len(self.attn_processors.keys() ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count: raise ValueError( F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the""" F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): module.set_processor(__UpperCAmelCase ) else: module.set_processor(processor.pop(F"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) if self.use_slicing and x.shape[0] > 1: __lowerCamelCase = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_slicing and z.shape[0] > 1: __lowerCamelCase = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self._decode(__UpperCAmelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[2] , b.shape[2] , __UpperCAmelCase ) for y in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[3] , b.shape[3] , __UpperCAmelCase ) for x in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __lowerCamelCase = [] for i in range(0 , x.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , x.shape[3] , __UpperCAmelCase ): __lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __lowerCamelCase = [] for i in range(0 , z.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , z.shape[3] , __UpperCAmelCase ): __lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ): '''simple docstring''' __lowerCamelCase = sample __lowerCamelCase = self.encode(__UpperCAmelCase ).latent_dist if sample_posterior: __lowerCamelCase = posterior.sample(generator=__UpperCAmelCase ) else: __lowerCamelCase = posterior.mode() __lowerCamelCase = self.decode(__UpperCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase )
330
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
1
from __future__ import annotations from collections.abc import Sequence from typing import Literal def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): __lowerCamelCase = list(_UpperCamelCase ) __lowerCamelCase = list(_UpperCamelCase ) __lowerCamelCase = 0 for i in range(len(_UpperCamelCase ) ): if lista[i] != lista[i]: count += 1 __lowerCamelCase = '''_''' if count > 1: return False else: return "".join(_UpperCamelCase ) def a__ ( _UpperCamelCase : list[str] ): __lowerCamelCase = [] while True: __lowerCamelCase = ['''$'''] * len(_UpperCamelCase ) __lowerCamelCase = [] for i in range(len(_UpperCamelCase ) ): for j in range(i + 1 ,len(_UpperCamelCase ) ): __lowerCamelCase = compare_string(binary[i] ,binary[j] ) if k is False: __lowerCamelCase = '''*''' __lowerCamelCase = '''*''' temp.append('''X''' ) for i in range(len(_UpperCamelCase ) ): if checka[i] == "$": pi.append(binary[i] ) if len(_UpperCamelCase ) == 0: return pi __lowerCamelCase = list(set(_UpperCamelCase ) ) def a__ ( _UpperCamelCase : int ,_UpperCamelCase : Sequence[float] ): __lowerCamelCase = [] for minterm in minterms: __lowerCamelCase = '''''' for _ in range(_UpperCamelCase ): __lowerCamelCase = str(minterm % 2 ) + string minterm //= 2 temp.append(_UpperCamelCase ) return temp def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : int ): __lowerCamelCase = list(_UpperCamelCase ) __lowerCamelCase = list(_UpperCamelCase ) __lowerCamelCase = 0 for i in range(len(_UpperCamelCase ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def a__ ( _UpperCamelCase : list[list[int]] ,_UpperCamelCase : list[str] ): __lowerCamelCase = [] __lowerCamelCase = [0] * len(_UpperCamelCase ) for i in range(len(chart[0] ) ): __lowerCamelCase = 0 __lowerCamelCase = -1 for j in range(len(_UpperCamelCase ) ): if chart[j][i] == 1: count += 1 __lowerCamelCase = j if count == 1: __lowerCamelCase = 1 for i in range(len(_UpperCamelCase ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(_UpperCamelCase ) ): __lowerCamelCase = 0 temp.append(prime_implicants[i] ) while True: __lowerCamelCase = 0 __lowerCamelCase = -1 __lowerCamelCase = 0 for i in range(len(_UpperCamelCase ) ): __lowerCamelCase = chart[i].count(1 ) if count_n > max_n: __lowerCamelCase = count_n __lowerCamelCase = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(_UpperCamelCase ) ): __lowerCamelCase = 0 def a__ ( _UpperCamelCase : list[str] ,_UpperCamelCase : list[str] ): __lowerCamelCase = [[0 for x in range(len(_UpperCamelCase ) )] for x in range(len(_UpperCamelCase ) )] for i in range(len(_UpperCamelCase ) ): __lowerCamelCase = prime_implicants[i].count('''_''' ) for j in range(len(_UpperCamelCase ) ): if is_for_table(prime_implicants[i] ,binary[j] ,_UpperCamelCase ): __lowerCamelCase = 1 return chart def a__ ( ): __lowerCamelCase = int(input('''Enter the no. of variables\n''' ) ) __lowerCamelCase = [ float(_UpperCamelCase ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] __lowerCamelCase = decimal_to_binary(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = check(_UpperCamelCase ) print('''Prime Implicants are:''' ) print(_UpperCamelCase ) __lowerCamelCase = prime_implicant_chart(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = selection(_UpperCamelCase ,_UpperCamelCase ) print('''Essential Prime Implicants are:''' ) print(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
330
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def a__ ( _UpperCamelCase : Optional[int] ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __lowerCamelCase = module __lowerCamelCase = nn.Sequential( nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , ) __lowerCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowerCamelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module lowerCAmelCase__ = """bigscience/bloom-1b7""" # Constant values lowerCAmelCase__ = 2.1_09_65_95_52_69_25_74 lowerCAmelCase__ = """Hello my name is""" lowerCAmelCase__ = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) lowerCAmelCase__ = 1_0 def lowerCamelCase ( self ): '''simple docstring''' # Models and tokenizer __lowerCamelCase = AutoTokenizer.from_pretrained(self.model_name ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # Models and tokenizer __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='''auto''' ) __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_abit.config self.assertTrue(hasattr(__UpperCAmelCase , '''quantization_config''' ) ) __lowerCamelCase = config.to_dict() __lowerCamelCase = config.to_diff_dict() __lowerCamelCase = config.to_json_string() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit __lowerCamelCase = self.model_fpaa.get_memory_footprint() __lowerCamelCase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __lowerCamelCase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__UpperCAmelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() __lowerCamelCase = True __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = model_abit_from_config.generate( input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() with self.assertRaises(__UpperCAmelCase ): __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ): # Tries with `str` self.model_abit.to('''cpu''' ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device('''cuda:0''' ) ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_fpaa.to(torch.floataa ) __lowerCamelCase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.to('''cpu''' ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.half() # Check this does not throw an error __lowerCamelCase = self.model_fpaa.float() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls ): '''simple docstring''' __lowerCamelCase = '''t5-small''' __lowerCamelCase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense __lowerCamelCase = AutoTokenizer.from_pretrained(cls.model_name ) __lowerCamelCase = '''Translate in German: Hello, my dog is cute''' def lowerCamelCase ( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaForConditionalGeneration __lowerCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules __lowerCamelCase = None # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) __lowerCamelCase = modules def lowerCamelCase ( self ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # model_name __lowerCamelCase = '''bigscience/bloom-560m''' __lowerCamelCase = '''t5-small''' # Different types of model __lowerCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Sequence classification model __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # CausalLM model __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Seq2seq model __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = pipeline( '''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __lowerCamelCase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''balanced''' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) # Second real batch __lowerCamelCase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''facebook/opt-350m''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ): return # Step 1: freeze all parameters __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __lowerCamelCase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __lowerCamelCase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__UpperCAmelCase ) ): __lowerCamelCase = LoRALayer(module.q_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.k_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __lowerCamelCase = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __lowerCamelCase = model.forward(**__UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__UpperCAmelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """gpt2-xl""" lowerCAmelCase__ = 3.31_91_85_48_54_15_21_87
330
1
def a__ ( _UpperCamelCase : int ): if num <= 0: raise ValueError('''Input must be a positive integer''' ) __lowerCamelCase = [True] * (num + 1) __lowerCamelCase = 2 while p * p <= num: if primes[p]: for i in range(p * p ,num + 1 ,_UpperCamelCase ): __lowerCamelCase = False p += 1 return [prime for prime in range(2 ,num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() a_ = int(input("""Enter a positive integer: """).strip()) print(prime_sieve_eratosthenes(user_num))
330
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = 42 class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ = True @register_to_config def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (64,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 4 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 0.18_215 , ): '''simple docstring''' super().__init__() # pass init params to Encoder __lowerCamelCase = Encoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , ) # pass init params to Decoder __lowerCamelCase = Decoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , ) __lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __lowerCamelCase = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 ) __lowerCamelCase = False __lowerCamelCase = False # only relevant if vae tiling is enabled __lowerCamelCase = self.config.sample_size __lowerCamelCase = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __lowerCamelCase = 0.25 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' if isinstance(__UpperCAmelCase , (Encoder, Decoder) ): __lowerCamelCase = value def lowerCamelCase ( self , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = use_tiling def lowerCamelCase ( self ): '''simple docstring''' self.enable_tiling(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = True def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = {} def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): __lowerCamelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return processors def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = len(self.attn_processors.keys() ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count: raise ValueError( F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the""" F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): module.set_processor(__UpperCAmelCase ) else: module.set_processor(processor.pop(F"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) if self.use_slicing and x.shape[0] > 1: __lowerCamelCase = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_slicing and z.shape[0] > 1: __lowerCamelCase = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self._decode(__UpperCAmelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[2] , b.shape[2] , __UpperCAmelCase ) for y in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[3] , b.shape[3] , __UpperCAmelCase ) for x in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __lowerCamelCase = [] for i in range(0 , x.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , x.shape[3] , __UpperCAmelCase ): __lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __lowerCamelCase = [] for i in range(0 , z.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , z.shape[3] , __UpperCAmelCase ): __lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ): '''simple docstring''' __lowerCamelCase = sample __lowerCamelCase = self.encode(__UpperCAmelCase ).latent_dist if sample_posterior: __lowerCamelCase = posterior.sample(generator=__UpperCAmelCase ) else: __lowerCamelCase = posterior.mode() __lowerCamelCase = self.decode(__UpperCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase )
330
1
import os from collections.abc import Iterator def a__ ( _UpperCamelCase : str = "." ): for dir_path, dir_names, filenames in os.walk(_UpperCamelCase ): __lowerCamelCase = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._'''] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_UpperCamelCase )[1] in (".py", ".ipynb"): yield os.path.join(_UpperCamelCase ,_UpperCamelCase ).lstrip('''./''' ) def a__ ( _UpperCamelCase : Tuple ): return F"""{i * " "}*""" if i else "\n##" def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): __lowerCamelCase = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_UpperCamelCase ) or old_parts[i] != new_part) and new_part: print(F"""{md_prefix(_UpperCamelCase )} {new_part.replace("_" ," " ).title()}""" ) return new_path def a__ ( _UpperCamelCase : str = "." ): __lowerCamelCase = '''''' for filepath in sorted(good_file_paths(_UpperCamelCase ) ): __lowerCamelCase ,__lowerCamelCase = os.path.split(_UpperCamelCase ) if filepath != old_path: __lowerCamelCase = print_path(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0 __lowerCamelCase = F"""{filepath}/{filename}""".replace(''' ''' ,'''%20''' ) __lowerCamelCase = os.path.splitext(filename.replace('''_''' ,''' ''' ).title() )[0] print(F"""{md_prefix(_UpperCamelCase )} [{filename}]({url})""" ) if __name__ == "__main__": print_directory_md(""".""")
330
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration a_ = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] a_ = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] a_ = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) a_ = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) a_ = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ): for tf_name, hf_name in patterns: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase ) __lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() __lowerCamelCase = {} # separating decoder weights __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = DECODER_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = REMAINING_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" __lowerCamelCase = mapping['''model.embed_positions.weight'''] __lowerCamelCase = mapping.pop('''model.embed_positions.weight''' ) __lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : int ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ): __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() a_ = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
330
1
import argparse from collections import defaultdict import yaml a_ = """docs/source/en/_toctree.yml""" def a__ ( _UpperCamelCase : List[str] ): __lowerCamelCase = defaultdict(_UpperCamelCase ) for doc in model_doc: counts[doc["local"]] += 1 __lowerCamelCase = [key for key, value in counts.items() if value > 1] __lowerCamelCase = [] for duplicate_key in duplicates: __lowerCamelCase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(_UpperCamelCase ) > 1: raise ValueError( F"""{duplicate_key} is present several times in the documentation table of content at """ '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(_UpperCamelCase ,key=lambda _UpperCamelCase : s["title"].lower() ) def a__ ( _UpperCamelCase : Any=False ): with open(_UpperCamelCase ,encoding='''utf-8''' ) as f: __lowerCamelCase = yaml.safe_load(f.read() ) # Get to the API doc __lowerCamelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 __lowerCamelCase = content[api_idx]['''sections'''] # Then to the model doc __lowerCamelCase = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 __lowerCamelCase = api_doc[model_idx]['''sections'''] __lowerCamelCase = [(idx, section) for idx, section in enumerate(_UpperCamelCase ) if '''sections''' in section] __lowerCamelCase = False for idx, modality_doc in modalities_docs: __lowerCamelCase = modality_doc['''sections'''] __lowerCamelCase = clean_model_doc_toc(_UpperCamelCase ) if old_modality_doc != new_modality_doc: __lowerCamelCase = True if overwrite: __lowerCamelCase = new_modality_doc if diff: if overwrite: __lowerCamelCase = model_doc __lowerCamelCase = api_doc with open(_UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(yaml.dump(_UpperCamelCase ,allow_unicode=_UpperCamelCase ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") a_ = parser.parse_args() check_model_doc(args.fix_and_overwrite)
330
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch a_ = logging.get_logger(__name__) class __lowerCAmelCase : def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase=None ): '''simple docstring''' if not conversation_id: __lowerCamelCase = uuid.uuida() if past_user_inputs is None: __lowerCamelCase = [] if generated_responses is None: __lowerCamelCase = [] __lowerCamelCase = conversation_id __lowerCamelCase = past_user_inputs __lowerCamelCase = generated_responses __lowerCamelCase = text def __eq__( self , __UpperCAmelCase ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) __lowerCamelCase = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: __lowerCamelCase = text def lowerCamelCase ( self ): '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __lowerCamelCase = None def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' self.generated_responses.append(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): '''simple docstring''' __lowerCamelCase = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): __lowerCamelCase = '''user''' if is_user else '''bot''' output += F"""{name} >> {text} \n""" return output @add_end_docstrings( lowerCAmelCase__ , r""" min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. """ , ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: __lowerCamelCase = self.tokenizer.eos_token def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = {} __lowerCamelCase = {} __lowerCamelCase = {} if min_length_for_response is not None: __lowerCamelCase = min_length_for_response if minimum_tokens is not None: __lowerCamelCase = minimum_tokens if "max_length" in generate_kwargs: __lowerCamelCase = generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __lowerCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self , __UpperCAmelCase , __UpperCAmelCase=0 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = super().__call__(__UpperCAmelCase , num_workers=__UpperCAmelCase , **__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=32 ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ): __lowerCamelCase = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version __lowerCamelCase = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": __lowerCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": __lowerCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=10 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) __lowerCamelCase = model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) __lowerCamelCase = max_length - minimum_tokens __lowerCamelCase = model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: __lowerCamelCase = model_inputs['''attention_mask'''][:, -trim:] __lowerCamelCase = model_inputs.pop('''conversation''' ) __lowerCamelCase = max_length __lowerCamelCase = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase ) if self.model.config.is_encoder_decoder: __lowerCamelCase = 1 else: __lowerCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = model_outputs['''output_ids'''] __lowerCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , ) __lowerCamelCase = model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.tokenizer.eos_token_id __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: __lowerCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
330
1
import math def a__ ( _UpperCamelCase : int ): if not isinstance(_UpperCamelCase ,_UpperCamelCase ): __lowerCamelCase = F"""Input value of [number={number}] must be an integer""" raise TypeError(_UpperCamelCase ) if number < 1: __lowerCamelCase = F"""Input value of [number={number}] must be > 0""" raise ValueError(_UpperCamelCase ) elif number == 1: return 3 elif number == 2: return 5 else: __lowerCamelCase = int(math.log(number // 3 ,2 ) ) + 2 __lowerCamelCase = [3, 5] __lowerCamelCase = 2 __lowerCamelCase = 3 for block in range(1 ,_UpperCamelCase ): for _ in range(_UpperCamelCase ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): a_ = 0 try: a_ = proth(number) except ValueError: print(f"ValueError: there is no {number}th Proth number") continue print(f"The {number}th Proth number: {value}")
330
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a_ = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["""memory_attention""", """encoder_attn"""], ["""attention""", """attn"""], ["""/""", """."""], [""".LayerNorm.gamma""", """_layer_norm.weight"""], [""".LayerNorm.beta""", """_layer_norm.bias"""], ["""r.layer_""", """r.layers."""], ["""output_proj""", """out_proj"""], ["""ffn.dense_1.""", """fc2."""], ["""ffn.dense.""", """fc1."""], ["""ffn_layer_norm""", """final_layer_norm"""], ["""kernel""", """weight"""], ["""encoder_layer_norm.""", """encoder.layer_norm."""], ["""decoder_layer_norm.""", """decoder.layer_norm."""], ["""embeddings.weights""", """shared.weight"""], ] def a__ ( _UpperCamelCase : int ): for pegasus_name, hf_name in PATTERNS: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = DEFAULTS.copy() cfg_kwargs.update(_UpperCamelCase ) __lowerCamelCase = PegasusConfig(**_UpperCamelCase ) __lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.model.state_dict() __lowerCamelCase = {} for k, v in tf_weights.items(): __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ) if new_k not in sd: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if "dense" in k or "proj" in new_k: __lowerCamelCase = v.T __lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}""" # make sure embedding.padding_idx is respected __lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] ) __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping} mapping.update(**_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''Adafactor''', '''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # save tokenizer first __lowerCamelCase = Path(_UpperCamelCase ).parent.name __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings'''] __lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(_UpperCamelCase ) # convert model __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""] if dataset == "large": __lowerCamelCase = task_specific_params __lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() sd.pop('''model.decoder.embed_positions.weight''' ) sd.pop('''model.encoder.embed_positions.weight''' ) torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() if args.save_dir is None: a_ = Path(args.tf_ckpt_path).parent.name a_ = os.path.join("""pegasus""", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
330
1
import argparse from collections import defaultdict def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : int ,_UpperCamelCase : Union[str, Any] ): __lowerCamelCase = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(_UpperCamelCase ,'''r''' ) as f: __lowerCamelCase = f.readlines() __lowerCamelCase = F"""class {class_name}(""" __lowerCamelCase = F"""{4 * " "}def {test_name}(""" __lowerCamelCase = F"""{8 * " "}{correct_line.split()[0]}""" __lowerCamelCase = F"""{16 * " "}{correct_line.split()[0]}""" __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = [] for line in lines: if line.startswith(_UpperCamelCase ): __lowerCamelCase = True elif in_class and line.startswith(_UpperCamelCase ): __lowerCamelCase = True elif in_class and in_func and (line.startswith(_UpperCamelCase ) or line.startswith(_UpperCamelCase )): __lowerCamelCase = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: __lowerCamelCase = True if in_class and in_func and in_line: if ")" not in line: continue else: __lowerCamelCase = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) __lowerCamelCase = __lowerCamelCase = __lowerCamelCase = __lowerCamelCase = False else: new_lines.append(_UpperCamelCase ) with open(_UpperCamelCase ,'''w''' ) as f: for line in new_lines: f.write(_UpperCamelCase ) def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict=None ): if fail is not None: with open(_UpperCamelCase ,'''r''' ) as f: __lowerCamelCase = {l.strip() for l in f.readlines()} else: __lowerCamelCase = None with open(_UpperCamelCase ,'''r''' ) as f: __lowerCamelCase = f.readlines() __lowerCamelCase = defaultdict(_UpperCamelCase ) for line in correct_lines: __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = line.split(''';''' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""") parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None) a_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
330
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } a_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ): for attribute in key.split('''.''' ): __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ) if weight_type is not None: __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape else: __lowerCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value else: __lowerCamelCase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Any ): __lowerCamelCase = [] __lowerCamelCase = fairseq_model.state_dict() __lowerCamelCase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,) __lowerCamelCase = True else: for key, mapped_key in MAPPING.items(): __lowerCamelCase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2] __lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase ) if "weight_g" in name: __lowerCamelCase = '''weight_g''' elif "weight_v" in name: __lowerCamelCase = '''weight_v''' elif "bias" in name: __lowerCamelCase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCamelCase = '''weight''' else: __lowerCamelCase = None set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) continue if not is_used: unused_weights.append(_UpperCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ): __lowerCamelCase = full_name.split('''conv_layers.''' )[-1] __lowerCamelCase = name.split('''.''' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_UpperCamelCase ) @torch.no_grad() def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : List[Any]=True ): if config_path is not None: __lowerCamelCase = UniSpeechSatConfig.from_pretrained(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatConfig() __lowerCamelCase = '''''' if is_finetuned: __lowerCamelCase = UniSpeechSatForCTC(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatForPreTraining(_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) __lowerCamelCase = model[0].eval() recursively_load_weights(_UpperCamelCase ,_UpperCamelCase ) hf_wavavec.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) a_ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
330
1
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class __lowerCAmelCase : lowerCAmelCase__ = BlenderbotSmallConfig lowerCAmelCase__ = {} lowerCAmelCase__ = """gelu""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = eos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = bos_token_id def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __lowerCamelCase = prepare_blenderbot_small_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, inputs_dict def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TFBlenderbotSmallModel(config=__UpperCAmelCase ).get_decoder() __lowerCamelCase = inputs_dict['''input_ids'''] __lowerCamelCase = input_ids[:1, :] __lowerCamelCase = inputs_dict['''attention_mask'''][:1, :] __lowerCamelCase = inputs_dict['''head_mask'''] __lowerCamelCase = 1 # first forward pass __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) __lowerCamelCase ,__lowerCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __lowerCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __lowerCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) __lowerCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __lowerCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx] __lowerCamelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : List[str] ,_UpperCamelCase : Dict ,_UpperCamelCase : Dict=None ,_UpperCamelCase : Union[str, Any]=None ,_UpperCamelCase : int=None ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : str=None ,): if attention_mask is None: __lowerCamelCase = tf.cast(tf.math.not_equal(_UpperCamelCase ,config.pad_token_id ) ,tf.inta ) if decoder_attention_mask is None: __lowerCamelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ), ] ,axis=-1 ,) if head_mask is None: __lowerCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) lowerCAmelCase__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase__ = ( { """conversational""": TFBlenderbotSmallForConditionalGeneration, """feature-extraction""": TFBlenderbotSmallModel, """summarization""": TFBlenderbotSmallForConditionalGeneration, """text2text-generation""": TFBlenderbotSmallForConditionalGeneration, """translation""": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TFBlenderbotSmallModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase ) @require_tokenizers @require_tf class __lowerCAmelCase ( unittest.TestCase ): lowerCAmelCase__ = [ """Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """ """ i'm going to throw up.\nand why is that?""" ] lowerCAmelCase__ = """facebook/blenderbot_small-90M""" @cached_property def lowerCamelCase ( self ): '''simple docstring''' # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) @cached_property def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.tokenizer(self.src_text , return_tensors='''tf''' ) __lowerCamelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , ) __lowerCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
330
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING a_ = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase__ ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type(__UpperCAmelCase ) def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , **__UpperCAmelCase ): '''simple docstring''' return {}, {}, {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = load_image(__UpperCAmelCase ) __lowerCamelCase = image.size __lowerCamelCase = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework ) return model_inputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.model(**__UpperCAmelCase ) return model_outputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = model_outputs.predicted_depth __lowerCamelCase = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=__UpperCAmelCase ) __lowerCamelCase = prediction.squeeze().cpu().numpy() __lowerCamelCase = (output * 255 / np.max(__UpperCAmelCase )).astype('''uint8''' ) __lowerCamelCase = Image.fromarray(__UpperCAmelCase ) __lowerCamelCase = {} __lowerCamelCase = predicted_depth __lowerCamelCase = depth return output_dict
330
1