code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import argparse import os import re SCREAMING_SNAKE_CASE : Union[str, Any] = """src/transformers/models/auto""" # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict SCREAMING_SNAKE_CASE : List[Any] = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""") # re pattern that matches identifiers in mappings SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""") def lowercase ( _snake_case : Any , _snake_case : bool = False ) ->Tuple: """simple docstring""" with open(_snake_case , '''r''' , encoding='''utf-8''' ) as f: __snake_case : str = f.read() __snake_case : List[Any] = content.split('''\n''' ) __snake_case : str = [] __snake_case : List[Any] = 0 while line_idx < len(_snake_case ): if _re_intro_mapping.search(lines[line_idx] ) is not None: __snake_case : List[str] = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(''' ''' * indent + '''(''' ): new_lines.append(lines[line_idx] ) line_idx += 1 __snake_case : str = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": __snake_case : Optional[Any] = line_idx while not lines[line_idx].startswith(''' ''' * indent + ''')''' ): line_idx += 1 blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers __snake_case : Optional[int] = sorted(_snake_case , key=lambda _snake_case : _re_identifier.search(_snake_case ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(_snake_case ) ) elif "\n".join(_snake_case ) != content: return True def lowercase ( _snake_case : bool = False ) ->int: """simple docstring""" __snake_case : Union[str, Any] = [os.path.join(_snake_case , _snake_case ) for f in os.listdir(_snake_case ) if f.endswith('''.py''' )] __snake_case : Optional[Any] = [sort_auto_mapping(_snake_case , overwrite=_snake_case ) for fname in fnames] if not overwrite and any(_snake_case ): __snake_case : List[str] = [f for f, d in zip(_snake_case , _snake_case ) if d] raise ValueError( f"""The following files have auto mappings that need sorting: {', '.join(_snake_case )}. Run `make style` to fix""" ''' this.''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
24
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[int] = { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""", } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='lxmert' lowerCamelCase__ ={} def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = vocab_size __snake_case : List[str] = hidden_size __snake_case : List[Any] = num_attention_heads __snake_case : int = hidden_act __snake_case : int = intermediate_size __snake_case : Any = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Tuple = max_position_embeddings __snake_case : List[str] = type_vocab_size __snake_case : str = initializer_range __snake_case : Tuple = layer_norm_eps __snake_case : List[Any] = num_qa_labels __snake_case : int = num_object_labels __snake_case : Optional[Any] = num_attr_labels __snake_case : Union[str, Any] = l_layers __snake_case : Optional[int] = x_layers __snake_case : Optional[int] = r_layers __snake_case : Tuple = visual_feat_dim __snake_case : Optional[int] = visual_pos_dim __snake_case : Dict = visual_loss_normalizer __snake_case : str = task_matched __snake_case : Optional[Any] = task_mask_lm __snake_case : List[str] = task_obj_predict __snake_case : Optional[Any] = task_qa __snake_case : Any = visual_obj_loss __snake_case : int = visual_attr_loss __snake_case : List[Any] = visual_feat_loss __snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**a_ )
24
1
"""simple docstring""" import re def lowercase ( _snake_case : str ) ->list: """simple docstring""" return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )] def lowercase ( _snake_case : str ) ->str: """simple docstring""" __snake_case : Optional[int] = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def lowercase ( _snake_case : str , _snake_case : bool , _snake_case : str ) ->str: """simple docstring""" try: __snake_case : Optional[int] = split_input(_snake_case ) if upper: __snake_case : Union[str, Any] = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __snake_case : Tuple = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def lowercase ( _snake_case : str ) ->str: """simple docstring""" return to_simple_case(_snake_case ) def lowercase ( _snake_case : str ) ->str: """simple docstring""" try: __snake_case : Tuple = to_simple_case(_snake_case ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def lowercase ( _snake_case : str , _snake_case : bool ) ->str: """simple docstring""" return to_complex_case(_snake_case , _snake_case , '''_''' ) def lowercase ( _snake_case : str , _snake_case : bool ) ->str: """simple docstring""" return to_complex_case(_snake_case , _snake_case , '''-''' ) if __name__ == "__main__": __import__("""doctest""").testmod()
24
"""simple docstring""" def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" __snake_case : Tuple = len(_snake_case ) __snake_case : str = sum(_snake_case ) __snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __snake_case : Optional[Any] = True for i in range(1 , s + 1 ): __snake_case : int = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __snake_case : Union[str, Any] = dp[i][j - 1] if arr[i - 1] <= j: __snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __snake_case : List[str] = s - 2 * j break return diff
24
1
"""simple docstring""" import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__) @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None if is_torch_available(): import torch from torch.utils.data import Dataset class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ): '''simple docstring''' __snake_case : Any = hans_processors[task]() __snake_case : int = os.path.join( a_ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , ) __snake_case : Tuple = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Dict = label_list[2], label_list[1] __snake_case : Any = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case : int = cached_features_file + '''.lock''' with FileLock(a_ ): if os.path.exists(a_ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case : Union[str, Any] = torch.load(a_ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case : Dict = ( processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) ) logger.info('''Training examples: %s''' , len(a_ ) ) __snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) logger.info('''Saving features into cached file %s''' , a_ ) torch.save(self.features , a_ ) def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list if is_tf_available(): import tensorflow as tf class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ): '''simple docstring''' __snake_case : List[Any] = hans_processors[task]() __snake_case : str = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Tuple = label_list[2], label_list[1] __snake_case : Dict = label_list __snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) __snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_00_00 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case : Union[str, Any] = tf.data.Dataset.from_generator( a_ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.dataset def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return ["contradiction", "entailment", "neutral"] def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = [] for i, line in enumerate(a_ ): if i == 0: continue __snake_case : Tuple = '''%s-%s''' % (set_type, line[0]) __snake_case : Dict = line[5] __snake_case : int = line[6] __snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case : List[Any] = line[0] examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) ) return examples def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]: """simple docstring""" __snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )} __snake_case : Tuple = [] for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ): if ex_index % 10_000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case : List[Any] = tokenizer( example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , ) __snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0 __snake_case : Union[str, Any] = int(example.pairID ) features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features SCREAMING_SNAKE_CASE : Dict = { """hans""": 3, } SCREAMING_SNAKE_CASE : str = { """hans""": HansProcessor, }
24
"""simple docstring""" from collections.abc import Callable def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float: """simple docstring""" __snake_case : float = a __snake_case : float = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: __snake_case : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: __snake_case : List[str] = mid else: __snake_case : str = mid __snake_case : str = start + (end - start) / 2.0 return mid def lowercase ( _snake_case : float ) ->float: """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
24
1
"""simple docstring""" import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowercase ( ) ->Optional[int]: """simple docstring""" raise RuntimeError('''CUDA out of memory.''' ) class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__(self ): '''simple docstring''' super().__init__() __snake_case : Optional[int] = nn.Linear(3 , 4 ) __snake_case : str = nn.BatchNormad(4 ) __snake_case : List[str] = nn.Linear(4 , 5 ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(a_ ) ) ) class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(a_ ): nonlocal batch_sizes batch_sizes.append(a_ ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(a_ , [1_28, 64, 32, 16, 8] ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(a_ , a_ ): nonlocal batch_sizes batch_sizes.append(a_ ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga __snake_case , __snake_case : Optional[int] = mock_training_loop_function('''hello''' ) self.assertListEqual(a_ , [1_28, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(a_ ): pass with self.assertRaises(a_ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(a_ ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(a_ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(a_ , a_ , a_ ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(a_ ) as cm: mock_training_loop_function(1_28 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(a_ ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(a_ ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = torch.cuda.memory_allocated() __snake_case : Optional[int] = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , a_ ) __snake_case : List[str] = release_memory(a_ ) self.assertEqual(torch.cuda.memory_allocated() , a_ )
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[str] = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : str = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def lowercase ( _snake_case : List[Any] , _snake_case : Any , _snake_case : Optional[Any] ) ->Optional[Any]: """simple docstring""" __snake_case : Optional[Any] = OmegaConf.load(_snake_case ) __snake_case : Tuple = torch.load(_snake_case , map_location='''cpu''' )['''model'''] __snake_case : Any = list(state_dict.keys() ) # extract state_dict for VQVAE __snake_case : Dict = {} __snake_case : Any = '''first_stage_model.''' for key in keys: if key.startswith(_snake_case ): __snake_case : Tuple = state_dict[key] # extract state_dict for UNetLDM __snake_case : str = {} __snake_case : List[Any] = '''model.diffusion_model.''' for key in keys: if key.startswith(_snake_case ): __snake_case : Dict = state_dict[key] __snake_case : Any = config.model.params.first_stage_config.params __snake_case : Dict = config.model.params.unet_config.params __snake_case : List[str] = VQModel(**_snake_case ).eval() vqvae.load_state_dict(_snake_case ) __snake_case : str = UNetLDMModel(**_snake_case ).eval() unet.load_state_dict(_snake_case ) __snake_case : str = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_snake_case , ) __snake_case : int = LDMPipeline(_snake_case , _snake_case , _snake_case ) pipeline.save_pretrained(_snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", type=str, required=True) parser.add_argument("""--config_path""", type=str, required=True) parser.add_argument("""--output_path""", type=str, required=True) SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
24
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =['image_processor', 'tokenizer'] lowerCamelCase__ ='CLIPImageProcessor' lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self , a_=None , a_=None , **a_ ): '''simple docstring''' __snake_case : Any = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a_ , ) __snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' ) __snake_case : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a_ , a_ ) def __call__(self , a_=None , a_=None , a_=None , **a_ ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if images is not None: __snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: __snake_case : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.batch_decode(*a_ , **a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.decode(*a_ , **a_ ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.tokenizer.model_input_names __snake_case : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE : Union[str, Any] = { """configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Optional[Any] = [ """MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegaForCausalLM""", """MegaForMaskedLM""", """MegaForMultipleChoice""", """MegaForQuestionAnswering""", """MegaForSequenceClassification""", """MegaForTokenClassification""", """MegaModel""", """MegaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
24
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE : List[Any] = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE : Tuple = { """facebook/mbart-large-en-ro""": 1024, """facebook/mbart-large-cc25""": 1024, } # fmt: off SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =['input_ids', 'attention_mask'] lowerCamelCase__ =MBartTokenizer lowerCamelCase__ =[] lowerCamelCase__ =[] def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token super().__init__( vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , ) __snake_case : Tuple = vocab_file __snake_case : Optional[Any] = False if not self.vocab_file else True __snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) __snake_case : Optional[int] = { lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX''' __snake_case : Any = self.convert_tokens_to_ids(self._src_lang ) __snake_case : Dict = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' __snake_case : Tuple = [self.sep_token_id] __snake_case : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __snake_case : Optional[int] = src_lang __snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ ) __snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ ) __snake_case : int = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ): '''simple docstring''' __snake_case : int = src_lang __snake_case : List[Any] = tgt_lang return super().prepare_seqaseq_batch(a_ , a_ , **a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : List[Any] = [] __snake_case : Any = [self.eos_token_id, self.cur_lang_code] __snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : Optional[Any] = [] __snake_case : Dict = [self.eos_token_id, self.cur_lang_code] __snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(a_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return __snake_case : Optional[Any] = os.path.join( a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file , a_ ) return (out_vocab_file,)
24
1
"""simple docstring""" from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar SCREAMING_SNAKE_CASE : List[Any] = TypeVar("""T""") class _UpperCAmelCase ( Generic[T] ): '''simple docstring''' def __init__(self , a_ , a_ ): '''simple docstring''' __snake_case : Any | T = None __snake_case : int = len(a_ ) __snake_case : list[T] = [any_type for _ in range(self.N )] + arr __snake_case : Dict = fnc self.build() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for p in range(self.N - 1 , 0 , -1 ): __snake_case : Optional[int] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' p += self.N __snake_case : List[str] = v while p > 1: __snake_case : Any = p // 2 __snake_case : Union[str, Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def SCREAMING_SNAKE_CASE (self , a_ , a_ ): # noqa: E741 '''simple docstring''' __snake_case , __snake_case : str = l + self.N, r + self.N __snake_case : T | None = None while l <= r: if l % 2 == 1: __snake_case : Optional[Any] = self.st[l] if res is None else self.fn(a_ , self.st[l] ) if r % 2 == 0: __snake_case : Any = self.st[r] if res is None else self.fn(a_ , self.st[r] ) __snake_case , __snake_case : str = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce SCREAMING_SNAKE_CASE : Optional[Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] SCREAMING_SNAKE_CASE : int = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } SCREAMING_SNAKE_CASE : Dict = SegmentTree(test_array, min) SCREAMING_SNAKE_CASE : Dict = SegmentTree(test_array, max) SCREAMING_SNAKE_CASE : List[str] = SegmentTree(test_array, lambda a, b: a + b) def lowercase ( ) ->None: """simple docstring""" for i in range(len(_snake_case ) ): for j in range(_snake_case , len(_snake_case ) ): __snake_case : List[Any] = reduce(_snake_case , test_array[i : j + 1] ) __snake_case : Any = reduce(_snake_case , test_array[i : j + 1] ) __snake_case : List[Any] = reduce(lambda _snake_case , _snake_case : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(_snake_case , _snake_case ) assert max_range == max_segment_tree.query(_snake_case , _snake_case ) assert sum_range == sum_segment_tree.query(_snake_case , _snake_case ) test_all_segments() for index, value in test_updates.items(): SCREAMING_SNAKE_CASE : Union[str, Any] = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
24
"""simple docstring""" import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__) @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None if is_torch_available(): import torch from torch.utils.data import Dataset class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ): '''simple docstring''' __snake_case : Any = hans_processors[task]() __snake_case : int = os.path.join( a_ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , ) __snake_case : Tuple = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Dict = label_list[2], label_list[1] __snake_case : Any = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case : int = cached_features_file + '''.lock''' with FileLock(a_ ): if os.path.exists(a_ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case : Union[str, Any] = torch.load(a_ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case : Dict = ( processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) ) logger.info('''Training examples: %s''' , len(a_ ) ) __snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) logger.info('''Saving features into cached file %s''' , a_ ) torch.save(self.features , a_ ) def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list if is_tf_available(): import tensorflow as tf class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ): '''simple docstring''' __snake_case : List[Any] = hans_processors[task]() __snake_case : str = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Tuple = label_list[2], label_list[1] __snake_case : Dict = label_list __snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) __snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_00_00 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case : Union[str, Any] = tf.data.Dataset.from_generator( a_ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.dataset def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return ["contradiction", "entailment", "neutral"] def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = [] for i, line in enumerate(a_ ): if i == 0: continue __snake_case : Tuple = '''%s-%s''' % (set_type, line[0]) __snake_case : Dict = line[5] __snake_case : int = line[6] __snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case : List[Any] = line[0] examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) ) return examples def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]: """simple docstring""" __snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )} __snake_case : Tuple = [] for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ): if ex_index % 10_000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case : List[Any] = tokenizer( example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , ) __snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0 __snake_case : Union[str, Any] = int(example.pairID ) features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features SCREAMING_SNAKE_CASE : Dict = { """hans""": 3, } SCREAMING_SNAKE_CASE : str = { """hans""": HansProcessor, }
24
1
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def lowercase ( ) ->List[Any]: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_snake_case ): requests.request('''GET''' , '''https://huggingface.co''' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 ) @pytest.mark.integration def lowercase ( ) ->Optional[Any]: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('''GET''' , '''https://huggingface.co''' ) def lowercase ( ) ->List[Any]: """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_snake_case ): http_head('''https://huggingface.co''' )
24
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[str] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='gptsan-japanese' lowerCamelCase__ =[ 'past_key_values', ] lowerCamelCase__ ={ 'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__(self , a_=3_60_00 , a_=12_80 , a_=10_24 , a_=81_92 , a_=40_96 , a_=1_28 , a_=10 , a_=0 , a_=16 , a_=16 , a_=1_28 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_59_98 , a_=3_59_95 , a_=3_59_99 , **a_ , ): '''simple docstring''' __snake_case : Any = vocab_size __snake_case : str = max_position_embeddings __snake_case : Any = d_model __snake_case : List[str] = d_ff __snake_case : Dict = d_ext __snake_case : Optional[Any] = d_spout __snake_case : int = num_switch_layers __snake_case : List[Any] = num_ext_layers __snake_case : Any = num_switch_layers + num_ext_layers __snake_case : Optional[int] = num_heads __snake_case : Tuple = num_experts __snake_case : List[Any] = expert_capacity __snake_case : Dict = dropout_rate __snake_case : Optional[Any] = layer_norm_epsilon __snake_case : Dict = router_bias __snake_case : str = router_jitter_noise __snake_case : List[str] = router_dtype __snake_case : Union[str, Any] = router_ignore_padding_tokens __snake_case : List[str] = output_hidden_states __snake_case : Optional[Any] = output_attentions __snake_case : Any = initializer_factor __snake_case : int = output_router_logits __snake_case : Union[str, Any] = use_cache super().__init__( separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
24
1
"""simple docstring""" import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__) SCREAMING_SNAKE_CASE : Any = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) SCREAMING_SNAKE_CASE : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =field( default=__snake_case, metadata={ 'help': ( 'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.' ) }, ) lowerCamelCase__ =field( default=__snake_case, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__snake_case )}, ) lowerCamelCase__ =field( default=__snake_case, metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) }, ) lowerCamelCase__ =field( default=__snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowerCamelCase__ =field( default=__snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowerCamelCase__ =field( default=__snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, ) lowerCamelCase__ =field( default=__snake_case, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, ) lowerCamelCase__ =field( default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, ) lowerCamelCase__ =field( default=__snake_case, metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) }, ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( '''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' ) @dataclass class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =field( default=__snake_case, metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) lowerCamelCase__ =field( default=__snake_case, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) lowerCamelCase__ =field(default=__snake_case, metadata={'help': 'The input training data file (a text file).'} ) lowerCamelCase__ =field( default=__snake_case, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'}, ) lowerCamelCase__ =field( default=__snake_case, metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'}, ) lowerCamelCase__ =field( default=__snake_case, metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'}, ) lowerCamelCase__ =field( default=__snake_case, metadata={'help': 'Overwrite the cached training and evaluation sets'} ) lowerCamelCase__ =field( default=5, metadata={ 'help': 'The percentage of the train set used as validation set in case there\'s no validation split' }, ) lowerCamelCase__ =field( default=__snake_case, metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated. Default to the max input length of the model.' ) }, ) lowerCamelCase__ =field( default=__snake_case, metadata={'help': 'The number of processes to use for the preprocessing.'}, ) lowerCamelCase__ =field( default=0.1_5, metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} ) lowerCamelCase__ =field( default=__snake_case, metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) }, ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if self.train_file is not None: __snake_case : str = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: __snake_case : int = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def lowercase ( _snake_case : Optional[Any] , _snake_case : str ) ->List[Any]: """simple docstring""" with open(_snake_case , '''r''' , encoding='''utf-8''' ) as f: __snake_case : Optional[int] = [json.loads(_snake_case ) for line in f.read().splitlines() if (len(_snake_case ) > 0 and not line.isspace())] assert len(_snake_case ) == len(_snake_case ) __snake_case : int = {c: dataset[c] for c in dataset.column_names} __snake_case : Optional[int] = refs return Dataset.from_dict(_snake_case ) def lowercase ( ) ->str: """simple docstring""" __snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case , __snake_case , __snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case , __snake_case , __snake_case : Tuple = parser.parse_args_into_dataclasses() # Detecting last checkpoint. __snake_case : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __snake_case : int = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , _snake_case ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __snake_case : str = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): __snake_case : Optional[int] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"""train[:{data_args.validation_split_percentage}%]""" , ) __snake_case : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"""train[{data_args.validation_split_percentage}%:]""" , ) else: __snake_case : Dict = {} if data_args.train_file is not None: __snake_case : int = data_args.train_file if data_args.validation_file is not None: __snake_case : Dict = data_args.validation_file __snake_case : str = data_args.train_file.split('''.''' )[-1] if extension == "txt": __snake_case : Optional[int] = '''text''' __snake_case : Any = load_dataset(_snake_case , data_files=_snake_case ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : Dict = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name: __snake_case : Tuple = AutoConfig.from_pretrained(model_args.config_name , **_snake_case ) elif model_args.model_name_or_path: __snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: __snake_case : int = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(f"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(f"""New config: {config}""" ) __snake_case : Optional[int] = { '''cache_dir''': model_args.cache_dir, '''use_fast''': model_args.use_fast_tokenizer, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.tokenizer_name: __snake_case : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_snake_case ) elif model_args.model_name_or_path: __snake_case : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) if model_args.model_name_or_path: __snake_case : Any = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __snake_case : Tuple = AutoModelForMaskedLM.from_config(_snake_case ) model.resize_token_embeddings(len(_snake_case ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: __snake_case : int = datasets['''train'''].column_names else: __snake_case : Tuple = datasets['''validation'''].column_names __snake_case : str = '''text''' if '''text''' in column_names else column_names[0] __snake_case : Optional[Any] = '''max_length''' if data_args.pad_to_max_length else False def tokenize_function(_snake_case : Dict ): # Remove empty lines __snake_case : Optional[int] = [line for line in examples['''text'''] if len(_snake_case ) > 0 and not line.isspace()] return tokenizer(examples['''text'''] , padding=_snake_case , truncation=_snake_case , max_length=data_args.max_seq_length ) __snake_case : List[str] = datasets.map( _snake_case , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: __snake_case : Tuple = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: __snake_case : Dict = add_chinese_references( tokenized_datasets['''validation'''] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer __snake_case : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file if has_ref: __snake_case : Optional[int] = False # Data collator # This one will take care of randomly masking the tokens. __snake_case : int = DataCollatorForWholeWordMask(tokenizer=_snake_case , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer __snake_case : List[str] = Trainer( model=_snake_case , args=_snake_case , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: if last_checkpoint is not None: __snake_case : str = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): __snake_case : str = model_args.model_name_or_path else: __snake_case : List[str] = None __snake_case : Tuple = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() # Saves the tokenizer too for easy upload __snake_case : List[Any] = os.path.join(training_args.output_dir , '''train_results.txt''' ) if trainer.is_world_process_zero(): with open(_snake_case , '''w''' ) as writer: logger.info('''***** Train results *****''' ) for key, value in sorted(train_result.metrics.items() ): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # Evaluation __snake_case : str = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __snake_case : Optional[int] = trainer.evaluate() __snake_case : int = math.exp(eval_output['''eval_loss'''] ) __snake_case : Any = perplexity __snake_case : int = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' ) if trainer.is_world_process_zero(): with open(_snake_case , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in sorted(results.items() ): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) return results def lowercase ( _snake_case : Optional[int] ) ->str: """simple docstring""" main() if __name__ == "__main__": main()
24
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } SCREAMING_SNAKE_CASE : int = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def lowercase ( _snake_case : Optional[int] ) ->int: """simple docstring""" __snake_case : int = {} with open(_snake_case , '''r''' ) as file: for line_number, line in enumerate(_snake_case ): __snake_case : Union[str, Any] = line.strip() if line: __snake_case : str = line.split() __snake_case : Union[str, Any] = line_number __snake_case : Dict = words[0] __snake_case : str = value return result def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]: """simple docstring""" for attribute in key.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : str = '''param''' if weight_type is not None and weight_type != "param": __snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape elif weight_type is not None and weight_type == "param": __snake_case : Optional[Any] = hf_pointer for attribute in hf_param_name.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : List[str] = shape_pointer.shape # let's reduce dimension __snake_case : int = value[0] else: __snake_case : int = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __snake_case : List[Any] = value elif weight_type == "weight_g": __snake_case : Tuple = value elif weight_type == "weight_v": __snake_case : str = value elif weight_type == "bias": __snake_case : str = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __snake_case : List[Any] = getattr(_snake_case , _snake_case ) __snake_case : int = value else: __snake_case : List[Any] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int: """simple docstring""" __snake_case : Optional[Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : List[str] = '''param''' if weight_type is not None and weight_type != "param": __snake_case : str = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __snake_case : Tuple = '''.'''.join([key, hf_param_name] ) else: __snake_case : Optional[int] = key __snake_case : List[Any] = value if '''lm_head''' in full_key else value[0] SCREAMING_SNAKE_CASE : Tuple = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict: """simple docstring""" __snake_case : Tuple = False for key, mapped_key in MAPPING.items(): __snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __snake_case : int = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2] __snake_case : Tuple = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: __snake_case : Union[str, Any] = '''weight_g''' elif "weight_v" in name: __snake_case : List[str] = '''weight_v''' elif "bias" in name: __snake_case : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case : List[Any] = '''weight''' else: __snake_case : Union[str, Any] = None if hf_dict is not None: rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) else: set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) return is_used return is_used def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any: """simple docstring""" __snake_case : Union[str, Any] = [] __snake_case : Union[str, Any] = fairseq_model.state_dict() __snake_case : str = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __snake_case : str = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) __snake_case : Union[str, Any] = True else: __snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case ) if not is_used: unused_weights.append(_snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1] __snake_case : str = name.split('''.''' ) __snake_case : Optional[int] = int(items[0] ) __snake_case : Any = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __snake_case : int = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __snake_case : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_snake_case ) @torch.no_grad() def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict: """simple docstring""" if config_path is not None: __snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case ) else: __snake_case : Tuple = WavaVecaConfig() if is_seq_class: __snake_case : Optional[int] = read_txt_into_dict(_snake_case ) __snake_case : List[Any] = idalabel __snake_case : int = WavaVecaForSequenceClassification(_snake_case ) __snake_case : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) feature_extractor.save_pretrained(_snake_case ) elif is_finetuned: if dict_path: __snake_case : int = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Tuple = target_dict.pad_index __snake_case : int = target_dict.bos_index __snake_case : Tuple = target_dict.eos_index __snake_case : Optional[Any] = len(target_dict.symbols ) __snake_case : Any = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) __snake_case : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched __snake_case : Dict = 0 __snake_case : List[Any] = 1 with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_snake_case , _snake_case ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) __snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False __snake_case : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) __snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) __snake_case : Optional[int] = WavaVecaForCTC(_snake_case ) else: __snake_case : Tuple = WavaVecaForPreTraining(_snake_case ) if is_finetuned or is_seq_class: __snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' ) __snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case ) __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case ) __snake_case : int = model[0].eval() recursively_load_weights(_snake_case , _snake_case , not is_finetuned ) hf_wavavec.save_pretrained(_snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) SCREAMING_SNAKE_CASE : Any = parser.parse_args() SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
24
1
"""simple docstring""" import math def lowercase ( _snake_case : int ) ->int: """simple docstring""" if not isinstance(_snake_case , _snake_case ): __snake_case : Tuple = f"""Input value of [number={number}] must be an integer""" raise TypeError(_snake_case ) if number < 1: __snake_case : Any = f"""Input value of [number={number}] must be > 0""" raise ValueError(_snake_case ) elif number == 1: return 3 elif number == 2: return 5 else: __snake_case : Any = int(math.log(number // 3 , 2 ) ) + 2 __snake_case : Tuple = [3, 5] __snake_case : Tuple = 2 __snake_case : Union[str, Any] = 3 for block in range(1 , _snake_case ): for _ in range(_snake_case ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): SCREAMING_SNAKE_CASE : Dict = 0 try: SCREAMING_SNAKE_CASE : List[str] = proth(number) except ValueError: print(F'ValueError: there is no {number}th Proth number') continue print(F'The {number}th Proth number: {value}')
24
"""simple docstring""" from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=__snake_case ): '''simple docstring''' lowerCamelCase__ =['transformers', 'torch', 'note_seq'] def __init__(self , *a_ , **a_ ): '''simple docstring''' requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
24
1
"""simple docstring""" from typing import List from .keymap import KEYMAP, get_character def lowercase ( _snake_case : str ) ->Optional[Any]: """simple docstring""" def decorator(_snake_case : List[str] ): __snake_case : str = getattr(_snake_case , '''handle_key''' , [] ) handle += [key] setattr(_snake_case , '''handle_key''' , _snake_case ) return func return decorator def lowercase ( *_snake_case : List[str] ) ->Dict: """simple docstring""" def decorator(_snake_case : int ): __snake_case : List[str] = getattr(_snake_case , '''handle_key''' , [] ) handle += keys setattr(_snake_case , '''handle_key''' , _snake_case ) return func return decorator class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __new__(cls , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Optional[int] = super().__new__(cls , a_ , a_ , a_ ) if not hasattr(a_ , '''key_handler''' ): setattr(a_ , '''key_handler''' , {} ) setattr(a_ , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): __snake_case : Optional[int] = getattr(a_ , '''handle_key''' , [] ) for key in handled_keys: __snake_case : Tuple = value return new_cls @staticmethod def SCREAMING_SNAKE_CASE (cls ): '''simple docstring''' __snake_case : int = get_character() if char != KEYMAP["undefined"]: __snake_case : Optional[int] = ord(a_ ) __snake_case : List[Any] = cls.key_handler.get(a_ ) if handler: __snake_case : str = char return handler(cls ) else: return None def lowercase ( cls : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
24
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ): '''simple docstring''' __snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20} __snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __snake_case : Tuple = parent __snake_case : Tuple = batch_size __snake_case : Tuple = num_channels __snake_case : List[str] = image_size __snake_case : Optional[Any] = min_resolution __snake_case : List[Any] = max_resolution __snake_case : List[Any] = do_resize __snake_case : Dict = size __snake_case : Dict = do_center_crop __snake_case : Dict = crop_size __snake_case : str = do_flip_channel_order def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _UpperCAmelCase ( __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = MobileViTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , '''do_resize''' ) ) self.assertTrue(hasattr(a_ , '''size''' ) ) self.assertTrue(hasattr(a_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(a_ , '''center_crop''' ) ) self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input __snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input __snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
24
1
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def lowercase ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int ) ->np.ndarray: """simple docstring""" if (ksize % 2) == 0: __snake_case : int = ksize + 1 __snake_case : Optional[int] = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(_snake_case ): for x in range(_snake_case ): # distance from center __snake_case : List[Any] = x - ksize // 2 __snake_case : Tuple = y - ksize // 2 # degree to radiant __snake_case : Union[str, Any] = theta / 180 * np.pi __snake_case : Dict = np.cos(_theta ) __snake_case : Any = np.sin(_theta ) # get kernel x __snake_case : Optional[Any] = cos_theta * px + sin_theta * py # get kernel y __snake_case : Union[str, Any] = -sin_theta * px + cos_theta * py # fill kernel __snake_case : int = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image SCREAMING_SNAKE_CASE : Tuple = imread("""../image_data/lena.jpg""") # turn image in gray scale value SCREAMING_SNAKE_CASE : str = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: SCREAMING_SNAKE_CASE : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) SCREAMING_SNAKE_CASE : int = out / out.max() * 255 SCREAMING_SNAKE_CASE : Optional[Any] = out.astype(np.uinta) imshow("""Original""", gray) imshow("""Gabor filter with 20x20 mask and 6 directions""", out) waitKey(0)
24
"""simple docstring""" import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def lowercase ( ) ->Optional[int]: """simple docstring""" __snake_case : int = torch.nn.Linear(2 , 4 ) __snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 ) __snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) __snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) __snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def lowercase ( _snake_case : str ) ->Optional[Any]: """simple docstring""" return (model.weight.abs().sum() + model.bias.abs().sum()).item() def lowercase ( _snake_case : Union[str, Any] ) ->Tuple: """simple docstring""" __snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(_snake_case ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(a_ ): __snake_case : Any = Accelerator(cpu=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case : Optional[int] = GradientState() assert state.num_steps == 1 __snake_case : str = 4 assert state.num_steps == 4 assert state.sync_gradients is True __snake_case : List[Any] = False assert state.sync_gradients is False GradientState._reset_state() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*a_ , **a_ ): pass with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ): __snake_case : List[Any] = Accelerator() self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : Any = get_signature(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # make sure loaded weights match accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : List[Any] = get_signature(a_ ) # saving hook def save_config(a_ , a_ , a_ ): __snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__} with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f: json.dump(a_ , a_ ) # loading hook def load_config(a_ , a_ ): with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f: __snake_case : Any = json.load(a_ ) __snake_case : List[str] = config['''class_name'''] __snake_case : str = accelerator.register_save_state_pre_hook(a_ ) __snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Any = '''random''' # make sure loaded weights match with hooks accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks removed load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Union[str, Any] = '''random''' # make sure loaded weights match with hooks removed accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components() __snake_case : Union[str, Any] = None # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertTrue(dummy_obj is None ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() __snake_case : Optional[int] = [1, 2, 3] # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , ) __snake_case : Optional[Any] = Accelerator() # This should work __snake_case : Any = accelerator.prepare(a_ ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Any = Accelerator() with init_empty_weights(): __snake_case : List[str] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : Union[str, Any] = infer_auto_device_map(a_ ) __snake_case : str = '''cpu''' __snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ ) # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Dict = accelerator.prepare(a_ ) @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU} with init_empty_weights(): __snake_case : Any = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : List[Any] = infer_auto_device_map(a_ ) __snake_case : Dict = 1 __snake_case : str = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Any = Accelerator() # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Tuple = accelerator.prepare(a_ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM with init_empty_weights(): __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) __snake_case : Tuple = infer_auto_device_map(a_ ) __snake_case : Tuple = 1 __snake_case : List[Any] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Tuple = Accelerator() # This should work __snake_case : Dict = accelerator.prepare(a_ ) @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = torch.nn.Linear(10 , 10 ) __snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 ) __snake_case : Optional[Any] = Accelerator(cpu=a_ ) __snake_case : str = accelerator.prepare(a_ )
24
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = 1 __snake_case : Optional[int] = 3 __snake_case : Dict = (32, 32) __snake_case : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a_ ) return image @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' torch.manual_seed(0 ) __snake_case : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) return model @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' torch.manual_seed(0 ) __snake_case : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' torch.manual_seed(0 ) __snake_case : str = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , ) return RobertaSeriesModelWithTransformation(a_ ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' def extract(*a_ , **a_ ): class _UpperCAmelCase : '''simple docstring''' def __init__(self ): '''simple docstring''' __snake_case : Optional[Any] = torch.ones([0] ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' self.pixel_values.to(a_ ) return self return Out() return extract def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __snake_case : Dict = self.dummy_cond_unet __snake_case : int = PNDMScheduler(skip_prk_steps=a_ ) __snake_case : Tuple = self.dummy_vae __snake_case : Optional[Any] = self.dummy_text_encoder __snake_case : Dict = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) __snake_case : List[str] = 77 __snake_case : List[str] = self.dummy_image.to(a_ ) __snake_case : Dict = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk __snake_case : Union[str, Any] = AltDiffusionImgaImgPipeline( unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , ) __snake_case : str = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=a_ ) __snake_case : Dict = alt_pipe.to(a_ ) alt_pipe.set_progress_bar_config(disable=a_ ) __snake_case : int = '''A painting of a squirrel eating a burger''' __snake_case : Tuple = torch.Generator(device=a_ ).manual_seed(0 ) __snake_case : Dict = alt_pipe( [prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=a_ , ) __snake_case : List[Any] = output.images __snake_case : List[Any] = torch.Generator(device=a_ ).manual_seed(0 ) __snake_case : Tuple = alt_pipe( [prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=a_ , return_dict=a_ , )[0] __snake_case : Union[str, Any] = image[0, -3:, -3:, -1] __snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __snake_case : Dict = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.dummy_cond_unet __snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=a_ ) __snake_case : Any = self.dummy_vae __snake_case : List[Any] = self.dummy_text_encoder __snake_case : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) __snake_case : Tuple = 77 __snake_case : Optional[Any] = self.dummy_image.to(a_ ) # put models in fp16 __snake_case : List[Any] = unet.half() __snake_case : int = vae.half() __snake_case : Tuple = bert.half() # make sure here that pndm scheduler skips prk __snake_case : Optional[Any] = AltDiffusionImgaImgPipeline( unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , ) __snake_case : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=a_ ) __snake_case : int = alt_pipe.to(a_ ) alt_pipe.set_progress_bar_config(disable=a_ ) __snake_case : Optional[Any] = '''A painting of a squirrel eating a burger''' __snake_case : Dict = torch.manual_seed(0 ) __snake_case : Any = alt_pipe( [prompt] , generator=a_ , num_inference_steps=2 , output_type='''np''' , image=a_ , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) # resize to resolution that is divisible by 8 but not 16 or 32 __snake_case : Optional[Any] = init_image.resize((7_60, 5_04) ) __snake_case : Optional[Any] = '''BAAI/AltDiffusion''' __snake_case : Tuple = AltDiffusionImgaImgPipeline.from_pretrained( a_ , safety_checker=a_ , ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() __snake_case : List[Any] = '''A fantasy landscape, trending on artstation''' __snake_case : Dict = torch.manual_seed(0 ) __snake_case : Optional[Any] = pipe( prompt=a_ , image=a_ , strength=0.75 , guidance_scale=7.5 , generator=a_ , output_type='''np''' , ) __snake_case : Union[str, Any] = output.images[0] __snake_case : Any = image[2_55:2_58, 3_83:3_86, -1] assert image.shape == (5_04, 7_60, 3) __snake_case : Dict = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __snake_case : Dict = init_image.resize((7_68, 5_12) ) __snake_case : Tuple = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' ) __snake_case : Tuple = '''BAAI/AltDiffusion''' __snake_case : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained( a_ , safety_checker=a_ , ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() __snake_case : Optional[Any] = '''A fantasy landscape, trending on artstation''' __snake_case : Optional[Any] = torch.manual_seed(0 ) __snake_case : str = pipe( prompt=a_ , image=a_ , strength=0.75 , guidance_scale=7.5 , generator=a_ , output_type='''np''' , ) __snake_case : List[str] = output.images[0] assert image.shape == (5_12, 7_68, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
24
"""simple docstring""" def lowercase ( _snake_case : int ) ->str: """simple docstring""" if number > 0: raise ValueError('''input must be a negative integer''' ) __snake_case : Any = len(bin(_snake_case )[3:] ) __snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:] __snake_case : Dict = ( ( '''1''' + '''0''' * (binary_number_length - len(_snake_case )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
24
1
"""simple docstring""" from math import factorial SCREAMING_SNAKE_CASE : int = {str(d): factorial(d) for d in range(10)} def lowercase ( _snake_case : int ) ->int: """simple docstring""" return sum(DIGIT_FACTORIAL[d] for d in str(_snake_case ) ) def lowercase ( ) ->int: """simple docstring""" __snake_case : Any = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , _snake_case ) if sum_of_digit_factorial(_snake_case ) == i ) if __name__ == "__main__": print(F'{solution() = }')
24
"""simple docstring""" def lowercase ( ) ->int: """simple docstring""" return [ a * b * (1_000 - a - b) for a in range(1 , 999 ) for b in range(_snake_case , 999 ) if (a * a + b * b == (1_000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
24
1
"""simple docstring""" import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 SCREAMING_SNAKE_CASE : Tuple = get_tests_dir("""fixtures""") class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = mock.Mock() __snake_case : Union[str, Any] = 5_00 __snake_case : str = {} __snake_case : List[Any] = HTTPError __snake_case : int = {} # Download this model to make sure it's in the cache. __snake_case : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=a_ ) as mock_head: __snake_case : List[Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' ) # This check we did call the fake head request mock_head.assert_called() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = WavaVecaFeatureExtractor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' ) @is_staging_test class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def SCREAMING_SNAKE_CASE (cls ): '''simple docstring''' __snake_case : List[Any] = TOKEN HfFolder.save_token(a_ ) @classmethod def SCREAMING_SNAKE_CASE (cls ): '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-feature-extractor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' ) except HTTPError: pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = WavaVecaFeatureExtractor.from_pretrained(a_ ) feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token ) __snake_case : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a_ , getattr(a_ , a_ ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-feature-extractor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( a_ , repo_id='''test-feature-extractor''' , push_to_hub=a_ , use_auth_token=self._token ) __snake_case : List[Any] = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a_ , getattr(a_ , a_ ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = WavaVecaFeatureExtractor.from_pretrained(a_ ) feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token ) __snake_case : Any = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a_ , getattr(a_ , a_ ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( a_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=a_ , use_auth_token=self._token ) __snake_case : List[Any] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a_ , getattr(a_ , a_ ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() __snake_case : Tuple = CustomFeatureExtractor.from_pretrained(a_ ) feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , ) __snake_case : str = AutoFeatureExtractor.from_pretrained( f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=a_ ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
24
"""simple docstring""" def lowercase ( _snake_case : int = 100 ) ->int: """simple docstring""" __snake_case : str = n * (n + 1) * (2 * n + 1) / 6 __snake_case : Dict = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'{solution() = }')
24
1
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class _UpperCAmelCase : '''simple docstring''' def __init__(self ): '''simple docstring''' __snake_case : Tuple = {} def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_=1 ): '''simple docstring''' if self.graph.get(a_ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: __snake_case : Optional[int] = [[w, v]] if not self.graph.get(a_ ): __snake_case : int = [] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return list(self.graph ) def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' if self.graph.get(a_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(a_ ) def SCREAMING_SNAKE_CASE (self , a_=-2 , a_=-1 ): '''simple docstring''' if s == d: return [] __snake_case : str = [] __snake_case : Any = [] if s == -2: __snake_case : Dict = list(self.graph )[0] stack.append(a_ ) visited.append(a_ ) __snake_case : Dict = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __snake_case : int = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(a_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) __snake_case : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(a_ ) != 0: __snake_case : List[str] = stack[len(a_ ) - 1] else: __snake_case : List[Any] = ss # check if se have reached the starting point if len(a_ ) == 0: return visited def SCREAMING_SNAKE_CASE (self , a_=-1 ): '''simple docstring''' if c == -1: __snake_case : List[Any] = floor(random() * 1_00_00 ) + 10 for i in range(a_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): __snake_case : Dict = floor(random() * c ) + 1 if n != i: self.add_pair(a_ , a_ , 1 ) def SCREAMING_SNAKE_CASE (self , a_=-2 ): '''simple docstring''' __snake_case : List[str] = deque() __snake_case : Optional[Any] = [] if s == -2: __snake_case : List[str] = list(self.graph )[0] d.append(a_ ) visited.append(a_ ) while d: __snake_case : Any = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Optional[Any] = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return len(self.graph[u] ) def SCREAMING_SNAKE_CASE (self , a_=-2 ): '''simple docstring''' __snake_case : int = [] __snake_case : Tuple = [] if s == -2: __snake_case : Optional[int] = list(self.graph )[0] stack.append(a_ ) visited.append(a_ ) __snake_case : Dict = s __snake_case : List[str] = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __snake_case : Any = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __snake_case : Optional[int] = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(a_ ) != 0: __snake_case : List[str] = stack[len(a_ ) - 1] else: __snake_case : Dict = ss # check if se have reached the starting point if len(a_ ) == 0: return sorted_nodes def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = [] __snake_case : Optional[int] = [] __snake_case : Dict = list(self.graph )[0] stack.append(a_ ) visited.append(a_ ) __snake_case : List[Any] = -2 __snake_case : Optional[int] = [] __snake_case : Union[str, Any] = s __snake_case : List[Any] = False __snake_case : Optional[int] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __snake_case : Dict = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __snake_case : int = len(a_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __snake_case : int = node[1] break # check if all the children are visited if s == ss: stack.pop() __snake_case : Optional[int] = True if len(a_ ) != 0: __snake_case : Tuple = stack[len(a_ ) - 1] else: __snake_case : Optional[int] = False indirect_parents.append(a_ ) __snake_case : List[str] = s __snake_case : int = ss # check if se have reached the starting point if len(a_ ) == 0: return list(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = [] __snake_case : Any = [] __snake_case : Dict = list(self.graph )[0] stack.append(a_ ) visited.append(a_ ) __snake_case : int = -2 __snake_case : Optional[int] = [] __snake_case : List[str] = s __snake_case : List[str] = False __snake_case : Union[str, Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __snake_case : Dict = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __snake_case : Union[str, Any] = len(a_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __snake_case : Union[str, Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() __snake_case : str = True if len(a_ ) != 0: __snake_case : Dict = stack[len(a_ ) - 1] else: __snake_case : Tuple = False indirect_parents.append(a_ ) __snake_case : Optional[int] = s __snake_case : List[str] = ss # check if se have reached the starting point if len(a_ ) == 0: return False def SCREAMING_SNAKE_CASE (self , a_=-2 , a_=-1 ): '''simple docstring''' __snake_case : Union[str, Any] = time() self.dfs(a_ , a_ ) __snake_case : List[str] = time() return end - begin def SCREAMING_SNAKE_CASE (self , a_=-2 ): '''simple docstring''' __snake_case : Any = time() self.bfs(a_ ) __snake_case : Any = time() return end - begin class _UpperCAmelCase : '''simple docstring''' def __init__(self ): '''simple docstring''' __snake_case : Union[str, Any] = {} def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_=1 ): '''simple docstring''' if self.graph.get(a_ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist __snake_case : str = [[w, v]] # add the other way if self.graph.get(a_ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist __snake_case : Optional[int] = [[w, u]] def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' if self.graph.get(a_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(a_ ) # the other way round if self.graph.get(a_ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(a_ ) def SCREAMING_SNAKE_CASE (self , a_=-2 , a_=-1 ): '''simple docstring''' if s == d: return [] __snake_case : Tuple = [] __snake_case : int = [] if s == -2: __snake_case : Optional[int] = list(self.graph )[0] stack.append(a_ ) visited.append(a_ ) __snake_case : List[Any] = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __snake_case : List[str] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(a_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) __snake_case : Optional[int] = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(a_ ) != 0: __snake_case : List[str] = stack[len(a_ ) - 1] else: __snake_case : List[Any] = ss # check if se have reached the starting point if len(a_ ) == 0: return visited def SCREAMING_SNAKE_CASE (self , a_=-1 ): '''simple docstring''' if c == -1: __snake_case : Union[str, Any] = floor(random() * 1_00_00 ) + 10 for i in range(a_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): __snake_case : Tuple = floor(random() * c ) + 1 if n != i: self.add_pair(a_ , a_ , 1 ) def SCREAMING_SNAKE_CASE (self , a_=-2 ): '''simple docstring''' __snake_case : Tuple = deque() __snake_case : Optional[Any] = [] if s == -2: __snake_case : Optional[int] = list(self.graph )[0] d.append(a_ ) visited.append(a_ ) while d: __snake_case : Optional[int] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return len(self.graph[u] ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = [] __snake_case : List[str] = [] __snake_case : Dict = list(self.graph )[0] stack.append(a_ ) visited.append(a_ ) __snake_case : List[str] = -2 __snake_case : List[Any] = [] __snake_case : Dict = s __snake_case : List[str] = False __snake_case : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __snake_case : Optional[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __snake_case : Optional[int] = len(a_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __snake_case : Dict = node[1] break # check if all the children are visited if s == ss: stack.pop() __snake_case : Tuple = True if len(a_ ) != 0: __snake_case : Optional[int] = stack[len(a_ ) - 1] else: __snake_case : Optional[Any] = False indirect_parents.append(a_ ) __snake_case : Optional[Any] = s __snake_case : List[str] = ss # check if se have reached the starting point if len(a_ ) == 0: return list(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = [] __snake_case : Any = [] __snake_case : Optional[int] = list(self.graph )[0] stack.append(a_ ) visited.append(a_ ) __snake_case : List[Any] = -2 __snake_case : Optional[int] = [] __snake_case : Union[str, Any] = s __snake_case : List[str] = False __snake_case : Optional[int] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __snake_case : Any = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __snake_case : Any = len(a_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __snake_case : Union[str, Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() __snake_case : List[str] = True if len(a_ ) != 0: __snake_case : Any = stack[len(a_ ) - 1] else: __snake_case : int = False indirect_parents.append(a_ ) __snake_case : Union[str, Any] = s __snake_case : str = ss # check if se have reached the starting point if len(a_ ) == 0: return False def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return list(self.graph ) def SCREAMING_SNAKE_CASE (self , a_=-2 , a_=-1 ): '''simple docstring''' __snake_case : Tuple = time() self.dfs(a_ , a_ ) __snake_case : Optional[Any] = time() return end - begin def SCREAMING_SNAKE_CASE (self , a_=-2 ): '''simple docstring''' __snake_case : List[Any] = time() self.bfs(a_ ) __snake_case : List[Any] = time() return end - begin
24
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__) @dataclass class _UpperCAmelCase ( datasets.BuilderConfig ): '''simple docstring''' lowerCamelCase__ =10000 lowerCamelCase__ =None lowerCamelCase__ =None class _UpperCAmelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' lowerCamelCase__ =ParquetConfig def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __snake_case : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a_ , (str, list, tuple) ): __snake_case : Union[str, Any] = data_files if isinstance(a_ , a_ ): __snake_case : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __snake_case : int = [] for split_name, files in data_files.items(): if isinstance(a_ , a_ ): __snake_case : List[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : int = [dl_manager.iter_files(a_ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(a_ ): with open(a_ , '''rb''' ) as f: __snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) ) break splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) ) return splits def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ): with open(a_ , '''rb''' ) as f: __snake_case : int = pq.ParquetFile(a_ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __snake_case : Dict = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" ) raise
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE : Dict = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : int = ["""BeitFeatureExtractor"""] SCREAMING_SNAKE_CASE : Any = ["""BeitImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Any = [ """BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BeitForImageClassification""", """BeitForMaskedImageModeling""", """BeitForSemanticSegmentation""", """BeitModel""", """BeitPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[str] = [ """FlaxBeitForImageClassification""", """FlaxBeitForMaskedImageModeling""", """FlaxBeitModel""", """FlaxBeitPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
24
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): __snake_case : Dict = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = '''sshleifer/tiny-gpt2''' __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = '''sgugger/tiny-distilbert-classification''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , ) __snake_case : Optional[Any] = TensorFlowBenchmark(a_ ) __snake_case : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Any = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ ) __snake_case : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = '''sshleifer/tiny-gpt2''' __snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Dict = TensorFlowBenchmark(a_ , [config] ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : int = TensorFlowBenchmark(a_ ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Dict = AutoConfig.from_pretrained(a_ ) __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random''' __snake_case : Tuple = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] ) __snake_case : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , ) __snake_case : Union[str, Any] = TensorFlowBenchmark(a_ ) benchmark.run() self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(a_ ): self.assertTrue(hasattr(a_ , '''sequential''' ) ) self.assertTrue(hasattr(a_ , '''cumulative''' ) ) self.assertTrue(hasattr(a_ , '''current''' ) ) self.assertTrue(hasattr(a_ , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ ) __snake_case : Optional[int] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
24
1
"""simple docstring""" from manim import * class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = Rectangle(height=0.5 , width=0.5 ) __snake_case : int = Rectangle(height=0.25 , width=0.25 ) __snake_case : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) __snake_case : Optional[int] = [mem.copy() for i in range(6 )] __snake_case : str = [mem.copy() for i in range(6 )] __snake_case : Optional[int] = VGroup(*a_ ).arrange(a_ , buff=0 ) __snake_case : Union[str, Any] = VGroup(*a_ ).arrange(a_ , buff=0 ) __snake_case : Union[str, Any] = VGroup(a_ , a_ ).arrange(a_ , buff=0 ) __snake_case : Tuple = Text('''CPU''' , font_size=24 ) __snake_case : Tuple = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(a_ ) __snake_case : int = [mem.copy() for i in range(4 )] __snake_case : List[str] = VGroup(*a_ ).arrange(a_ , buff=0 ) __snake_case : List[Any] = Text('''GPU''' , font_size=24 ) __snake_case : Any = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ ) gpu.move_to([-1, -1, 0] ) self.add(a_ ) __snake_case : List[str] = [mem.copy() for i in range(6 )] __snake_case : Dict = VGroup(*a_ ).arrange(a_ , buff=0 ) __snake_case : Dict = Text('''Model''' , font_size=24 ) __snake_case : int = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ ) model.move_to([3, -1.0, 0] ) self.add(a_ ) __snake_case : List[str] = [] __snake_case : str = [] __snake_case : str = [] for i, rect in enumerate(a_ ): rect.set_stroke(a_ ) __snake_case : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(a_ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a_ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=a_ , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=a_ , buff=0.0 ) self.add(a_ ) model_cpu_arr.append(a_ ) self.add(*a_ , *a_ , *a_ ) __snake_case : str = [mem.copy() for i in range(6 )] __snake_case : Optional[Any] = VGroup(*a_ ).arrange(a_ , buff=0 ) __snake_case : Optional[Any] = Text('''Loaded Checkpoint''' , font_size=24 ) __snake_case : Optional[Any] = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ ) checkpoint.move_to([3, 0.5, 0] ) self.add(a_ ) __snake_case : Tuple = [] __snake_case : Optional[int] = [] for i, rect in enumerate(a_ ): __snake_case : int = fill.copy().set_fill(a_ , opacity=0.7 ) target.move_to(a_ ) ckpt_arr.append(a_ ) __snake_case : Optional[Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(a_ ) self.add(*a_ , *a_ ) __snake_case : str = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __snake_case : Optional[int] = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(a_ , a_ ) __snake_case : List[str] = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(a_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(a_ ) __snake_case : Tuple = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) __snake_case : Dict = [meta_mem.copy() for i in range(6 )] __snake_case : Optional[int] = [meta_mem.copy() for i in range(6 )] __snake_case : Any = VGroup(*a_ ).arrange(a_ , buff=0 ) __snake_case : Optional[Any] = VGroup(*a_ ).arrange(a_ , buff=0 ) __snake_case : Optional[Any] = VGroup(a_ , a_ ).arrange(a_ , buff=0 ) __snake_case : List[Any] = Text('''Disk''' , font_size=24 ) __snake_case : str = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(a_ , run_time=3 ) , Write(a_ , run_time=1 ) , Create(a_ , run_time=1 ) ) __snake_case : int = [] for i, rect in enumerate(a_ ): __snake_case : Optional[Any] = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(a_ , run_time=1.5 ) ) self.play(*a_ ) self.play(FadeOut(a_ ) ) __snake_case : List[str] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(a_ , run_time=3 ) ) self.play( FadeOut(a_ , a_ , *a_ , *a_ ) , ) self.wait()
24
"""simple docstring""" import logging import os import threading import time try: import warnings except ImportError: SCREAMING_SNAKE_CASE : Tuple = None try: import msvcrt except ImportError: SCREAMING_SNAKE_CASE : List[str] = None try: import fcntl except ImportError: SCREAMING_SNAKE_CASE : Tuple = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: SCREAMING_SNAKE_CASE : List[str] = OSError # Data # ------------------------------------------------ SCREAMING_SNAKE_CASE : List[Any] = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] SCREAMING_SNAKE_CASE : List[Any] = """3.0.12""" SCREAMING_SNAKE_CASE : int = None def lowercase ( ) ->str: """simple docstring""" global _logger __snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ ) return _logger class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[int] = lock_file return None def __str__(self ): '''simple docstring''' __snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired.""" return temp class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[Any] = lock return None def __enter__(self ): '''simple docstring''' return self.lock def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.lock.release() return None class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long __snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ ) # The path to the lock file. __snake_case : str = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __snake_case : Dict = None # The default timeout value. __snake_case : List[Any] = timeout # We use this lock primarily for the lock counter. __snake_case : Tuple = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __snake_case : Optional[Any] = 0 return None @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._timeout @timeout.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Dict = float(a_ ) return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file_fd is not None def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ): '''simple docstring''' if timeout is None: __snake_case : List[str] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __snake_case : Optional[int] = id(self ) __snake_case : str = self._lock_file __snake_case : Optional[int] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(a_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __snake_case : Optional[int] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def SCREAMING_SNAKE_CASE (self , a_=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __snake_case : Tuple = id(self ) __snake_case : str = self._lock_file logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __snake_case : Dict = 0 logger().debug(f"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__(self ): '''simple docstring''' self.acquire() return self def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.release() return None def __del__(self ): '''simple docstring''' self.release(force=a_ ) return None def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : Any = os.path.basename(a_ ) if len(a_ ) > max_length and max_length > 0: __snake_case : List[Any] = os.path.dirname(a_ ) __snake_case : Any = str(hash(a_ ) ) __snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(a_ , a_ ) else: return path class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) __snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __snake_case : Any = os.open(self._lock_file , a_ ) except OSError: pass else: try: msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(a_ ) else: __snake_case : Dict = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Dict = None msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 ) os.close(a_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC __snake_case : List[str] = os.open(self._lock_file , a_ ) try: fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(a_ ) else: __snake_case : Optional[int] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Tuple = None fcntl.flock(a_ , fcntl.LOCK_UN ) os.close(a_ ) return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __snake_case : Tuple = os.open(self._lock_file , a_ ) except OSError: pass else: __snake_case : List[Any] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' os.close(self._lock_file_fd ) __snake_case : int = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None SCREAMING_SNAKE_CASE : Dict = None if msvcrt: SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock elif fcntl: SCREAMING_SNAKE_CASE : List[str] = UnixFileLock else: SCREAMING_SNAKE_CASE : str = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
24
1
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=13 , a_=32 , a_=3 , a_=4 , a_=[10, 20, 30, 40] , a_=[2, 2, 3, 2] , a_=True , a_=True , a_=37 , a_="gelu" , a_=10 , a_=0.02 , a_=["stage2", "stage3", "stage4"] , a_=3 , a_=None , ): '''simple docstring''' __snake_case : List[Any] = parent __snake_case : Tuple = batch_size __snake_case : Any = image_size __snake_case : List[Any] = num_channels __snake_case : Union[str, Any] = num_stages __snake_case : Any = hidden_sizes __snake_case : Dict = depths __snake_case : Optional[Any] = is_training __snake_case : Dict = use_labels __snake_case : Tuple = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : List[str] = type_sequence_label_size __snake_case : Tuple = initializer_range __snake_case : int = out_features __snake_case : Tuple = num_labels __snake_case : Optional[Any] = scope __snake_case : Optional[Any] = num_stages def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Any = None if self.use_labels: __snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : str = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=a_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=a_ , loss_ignore_index=2_55 , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = UperNetForSemanticSegmentation(config=a_ ) model.to(a_ ) model.eval() __snake_case : List[Any] = model(a_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[Any] = config_and_inputs __snake_case : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =(UperNetForSemanticSegmentation,) if is_torch_available() else () lowerCamelCase__ ={'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {} lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = UperNetModelTester(self ) __snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = model_class(a_ ) __snake_case : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Tuple = [*signature.parameters.keys()] __snake_case : int = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) @unittest.skip(reason='''UperNet does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip(reason='''UperNet does not support input and output embeddings''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip(reason='''UperNet does not have a base model''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip(reason='''UperNet does not have a base model''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' def check_hidden_states_output(a_ , a_ , a_ ): __snake_case : Optional[int] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): __snake_case : List[str] = model(**self._prepare_for_class(a_ , a_ ) ) __snake_case : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case : Any = self.model_tester.num_stages self.assertEqual(len(a_ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[Any] = True check_hidden_states_output(a_ , a_ , a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : Optional[Any] = True check_hidden_states_output(a_ , a_ , a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : List[str] = _config_zero_init(a_ ) __snake_case : int = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: __snake_case : Tuple = model_class(config=a_ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip(reason='''UperNet does not have tied weights''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def lowercase ( ) ->Any: """simple docstring""" __snake_case : Dict = hf_hub_download( repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' ) __snake_case : Union[str, Any] = Image.open(_snake_case ).convert('''RGB''' ) return image @require_torch @require_vision @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' ) __snake_case : int = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(a_ ) __snake_case : str = prepare_img() __snake_case : Union[str, Any] = processor(images=a_ , return_tensors='''pt''' ).to(a_ ) with torch.no_grad(): __snake_case : Any = model(**a_ ) __snake_case : int = torch.Size((1, model.config.num_labels, 5_12, 5_12) ) self.assertEqual(outputs.logits.shape , a_ ) __snake_case : List[str] = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , a_ , atol=1E-4 ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' ) __snake_case : Any = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(a_ ) __snake_case : Dict = prepare_img() __snake_case : Optional[Any] = processor(images=a_ , return_tensors='''pt''' ).to(a_ ) with torch.no_grad(): __snake_case : Optional[int] = model(**a_ ) __snake_case : List[Any] = torch.Size((1, model.config.num_labels, 5_12, 5_12) ) self.assertEqual(outputs.logits.shape , a_ ) __snake_case : Dict = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , a_ , atol=1E-4 ) )
24
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ): '''simple docstring''' __snake_case : Any = parent __snake_case : int = batch_size __snake_case : Dict = seq_length __snake_case : List[str] = is_training __snake_case : List[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : Union[str, Any] = use_labels __snake_case : str = vocab_size __snake_case : int = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : int = num_attention_heads __snake_case : str = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : int = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : List[Any] = max_position_embeddings __snake_case : Any = type_vocab_size __snake_case : Dict = type_sequence_label_size __snake_case : Optional[Any] = initializer_range __snake_case : Union[str, Any] = num_labels __snake_case : Any = scope __snake_case : Any = range_bbox def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : List[str] = bbox[i, j, 3] __snake_case : Any = bbox[i, j, 1] __snake_case : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : List[str] = bbox[i, j, 2] __snake_case : Union[str, Any] = bbox[i, j, 0] __snake_case : Dict = t __snake_case : Optional[int] = None if self.use_input_mask: __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __snake_case : Dict = None if self.use_token_type_ids: __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : List[str] = None __snake_case : Union[str, Any] = None if self.use_labels: __snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ ) __snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ ) __snake_case : List[str] = model(a_ , bbox=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[int] = self.num_labels __snake_case : List[str] = LiltForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Tuple = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Dict = config_and_inputs __snake_case : Any = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ =( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =False lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' return True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModelTester(self ) __snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Dict = type self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = LiltModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @require_torch @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ ) __snake_case : Dict = torch.tensor([[1, 2]] , device=a_ ) __snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ ) # forward pass with torch.no_grad(): __snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ ) __snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] ) __snake_case : str = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , ) self.assertTrue(outputs.last_hidden_state.shape , a_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
24
1
"""simple docstring""" class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = n __snake_case : Dict = [None] * self.n __snake_case : List[str] = 0 # index of the first element __snake_case : Optional[int] = 0 __snake_case : Tuple = 0 def __len__(self ): '''simple docstring''' return self.size def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.size == 0 def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return False if self.is_empty() else self.array[self.front] def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if self.size >= self.n: raise Exception('''QUEUE IS FULL''' ) __snake_case : Tuple = data __snake_case : Any = (self.rear + 1) % self.n self.size += 1 return self def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if self.size == 0: raise Exception('''UNDERFLOW''' ) __snake_case : int = self.array[self.front] __snake_case : Union[str, Any] = None __snake_case : Tuple = (self.front + 1) % self.n self.size -= 1 return temp
24
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ): '''simple docstring''' __snake_case : List[Any] = parent __snake_case : List[Any] = batch_size __snake_case : str = seq_length __snake_case : Any = is_training __snake_case : Any = use_input_mask __snake_case : str = use_token_type_ids __snake_case : Dict = use_labels __snake_case : int = vocab_size __snake_case : Union[str, Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : str = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : str = hidden_act __snake_case : Union[str, Any] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : str = max_position_embeddings __snake_case : Dict = type_vocab_size __snake_case : List[Any] = type_sequence_label_size __snake_case : Union[str, Any] = initializer_range __snake_case : str = num_labels __snake_case : Dict = num_choices __snake_case : Optional[int] = scope def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Dict = None if self.use_input_mask: __snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Tuple = None __snake_case : List[str] = None __snake_case : Dict = None if self.use_labels: __snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[str] = DistilBertModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model(a_ , a_ ) __snake_case : List[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : Optional[Any] = model( a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Any = self.num_labels __snake_case : Optional[int] = DistilBertForSequenceClassification(a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = self.num_labels __snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = self.num_choices __snake_case : Any = DistilBertForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : Optional[int] = model( a_ , attention_mask=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs __snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCamelCase__ =( { 'feature-extraction': DistilBertModel, 'fill-mask': DistilBertForMaskedLM, 'question-answering': DistilBertForQuestionAnswering, 'text-classification': DistilBertForSequenceClassification, 'token-classification': DistilBertForTokenClassification, 'zero-shot': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = DistilBertModelTester(self ) __snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Tuple = DistilBertModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __snake_case : List[str] = True __snake_case : Tuple = model_class(config=a_ ) __snake_case : Any = self._prepare_for_class(a_ , a_ ) __snake_case : Dict = torch.jit.trace( a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) ) __snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ ) loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) ) @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __snake_case : List[Any] = model(a_ , attention_mask=a_ )[0] __snake_case : Tuple = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , a_ ) __snake_case : Optional[int] = torch.tensor( [[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
24
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[int] = { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""", } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='lxmert' lowerCamelCase__ ={} def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = vocab_size __snake_case : List[str] = hidden_size __snake_case : List[Any] = num_attention_heads __snake_case : int = hidden_act __snake_case : int = intermediate_size __snake_case : Any = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Tuple = max_position_embeddings __snake_case : List[str] = type_vocab_size __snake_case : str = initializer_range __snake_case : Tuple = layer_norm_eps __snake_case : List[Any] = num_qa_labels __snake_case : int = num_object_labels __snake_case : Optional[Any] = num_attr_labels __snake_case : Union[str, Any] = l_layers __snake_case : Optional[int] = x_layers __snake_case : Optional[int] = r_layers __snake_case : Tuple = visual_feat_dim __snake_case : Optional[int] = visual_pos_dim __snake_case : Dict = visual_loss_normalizer __snake_case : str = task_matched __snake_case : Optional[Any] = task_mask_lm __snake_case : List[str] = task_obj_predict __snake_case : Optional[Any] = task_qa __snake_case : Any = visual_obj_loss __snake_case : int = visual_attr_loss __snake_case : List[Any] = visual_feat_loss __snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**a_ )
24
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]: """simple docstring""" def get_masked_lm_array(_snake_case : str ): __snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : str = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Any = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_array(_snake_case : str ): __snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_layer_array(_snake_case : int , _snake_case : str ): __snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[Any] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ): __snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case ) __snake_case : int = array.reshape(_snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) print(f"""Loading model based on config from {config_path}...""" ) __snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case ) __snake_case : Dict = BertForMaskedLM(_snake_case ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __snake_case : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention __snake_case : BertSelfAttention = layer.attention.self __snake_case : int = get_encoder_attention_layer_array( _snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape ) __snake_case : List[Any] = get_encoder_attention_layer_array( _snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape ) __snake_case : Union[str, Any] = get_encoder_attention_layer_array( _snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape ) # Self-attention Output __snake_case : BertSelfOutput = layer.attention.output __snake_case : Dict = get_encoder_attention_layer_array( _snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape ) __snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' ) __snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' ) # Intermediate __snake_case : BertIntermediate = layer.intermediate __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' ) __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' ) # Output __snake_case : BertOutput = layer.output __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' ) __snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' ) __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' ) __snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' ) # Embeddings __snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' ) __snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' ) __snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' ) __snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' ) # LM Head __snake_case : Optional[Any] = model.cls.predictions.transform __snake_case : Dict = get_masked_lm_array('''dense/kernel''' ) __snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' ) __snake_case : str = get_masked_lm_array('''layer_norm/gamma''' ) __snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' ) __snake_case : Tuple = get_masked_lm_array('''embedding_table''' ) # Pooling __snake_case : Optional[Any] = BertPooler(config=_snake_case ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' ) # Export final model model.save_pretrained(_snake_case ) # Integration test - should load without any errors ;) __snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case ) print(new_model.eval() ) print('''Model conversion was done sucessfully!''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model.""", ) SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
24
1
"""simple docstring""" import math SCREAMING_SNAKE_CASE : List[str] = 10 SCREAMING_SNAKE_CASE : Optional[int] = 7 SCREAMING_SNAKE_CASE : int = BALLS_PER_COLOUR * NUM_COLOURS def lowercase ( _snake_case : int = 20 ) ->str: """simple docstring""" __snake_case : Any = math.comb(_snake_case , _snake_case ) __snake_case : Dict = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _snake_case ) __snake_case : List[Any] = NUM_COLOURS * (1 - missing_colour / total) return f"""{result:.9f}""" if __name__ == "__main__": print(solution(20))
24
"""simple docstring""" import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_ , a_ = None , a_ = None , a_ = False , **a_ , ): '''simple docstring''' super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ ) __snake_case : Union[str, Any] = Sql( cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = None __snake_case : Dict = None __snake_case : Dict = None __snake_case : List[str] = None self.builder.download_and_prepare( download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , ) # Build dataset for splits __snake_case : Any = self.builder.as_dataset( split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory ) return dataset class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_ , a_ , a_ = None , a_ = None , **a_ , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" ) __snake_case : List[str] = dataset __snake_case : Tuple = name __snake_case : Optional[int] = con __snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __snake_case : Dict = num_proc __snake_case : Dict = to_sql_kwargs def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ ) __snake_case : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ ) __snake_case : Any = self.to_sql_kwargs.pop('''index''' , a_ ) __snake_case : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs ) return written def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case , __snake_case , __snake_case : Optional[Any] = args __snake_case : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs __snake_case : Dict = query_table( table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , ) __snake_case : Tuple = batch.to_pandas() __snake_case : str = df.to_sql(self.name , self.con , index=a_ , **a_ ) return num_rows or len(a_ ) def SCREAMING_SNAKE_CASE (self , a_ , **a_ ): '''simple docstring''' __snake_case : int = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: __snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
24
1
"""simple docstring""" import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_ , a_ = None , a_ = None , a_ = False , **a_ , ): '''simple docstring''' super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ ) __snake_case : Union[str, Any] = Sql( cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = None __snake_case : Dict = None __snake_case : Dict = None __snake_case : List[str] = None self.builder.download_and_prepare( download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , ) # Build dataset for splits __snake_case : Any = self.builder.as_dataset( split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory ) return dataset class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_ , a_ , a_ = None , a_ = None , **a_ , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" ) __snake_case : List[str] = dataset __snake_case : Tuple = name __snake_case : Optional[int] = con __snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __snake_case : Dict = num_proc __snake_case : Dict = to_sql_kwargs def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ ) __snake_case : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ ) __snake_case : Any = self.to_sql_kwargs.pop('''index''' , a_ ) __snake_case : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs ) return written def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case , __snake_case , __snake_case : Optional[Any] = args __snake_case : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs __snake_case : Dict = query_table( table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , ) __snake_case : Tuple = batch.to_pandas() __snake_case : str = df.to_sql(self.name , self.con , index=a_ , **a_ ) return num_rows or len(a_ ) def SCREAMING_SNAKE_CASE (self , a_ , **a_ ): '''simple docstring''' __snake_case : int = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: __snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
24
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[int] = { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""", } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='lxmert' lowerCamelCase__ ={} def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = vocab_size __snake_case : List[str] = hidden_size __snake_case : List[Any] = num_attention_heads __snake_case : int = hidden_act __snake_case : int = intermediate_size __snake_case : Any = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Tuple = max_position_embeddings __snake_case : List[str] = type_vocab_size __snake_case : str = initializer_range __snake_case : Tuple = layer_norm_eps __snake_case : List[Any] = num_qa_labels __snake_case : int = num_object_labels __snake_case : Optional[Any] = num_attr_labels __snake_case : Union[str, Any] = l_layers __snake_case : Optional[int] = x_layers __snake_case : Optional[int] = r_layers __snake_case : Tuple = visual_feat_dim __snake_case : Optional[int] = visual_pos_dim __snake_case : Dict = visual_loss_normalizer __snake_case : str = task_matched __snake_case : Optional[Any] = task_mask_lm __snake_case : List[str] = task_obj_predict __snake_case : Optional[Any] = task_qa __snake_case : Any = visual_obj_loss __snake_case : int = visual_attr_loss __snake_case : List[Any] = visual_feat_loss __snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**a_ )
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[str] = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : str = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
24
"""simple docstring""" def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" __snake_case : Tuple = len(_snake_case ) __snake_case : str = sum(_snake_case ) __snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __snake_case : Optional[Any] = True for i in range(1 , s + 1 ): __snake_case : int = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __snake_case : Union[str, Any] = dp[i][j - 1] if arr[i - 1] <= j: __snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __snake_case : List[str] = s - 2 * j break return diff
24
1
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ): '''simple docstring''' __snake_case : Any = parent __snake_case : int = batch_size __snake_case : Dict = seq_length __snake_case : List[str] = is_training __snake_case : List[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : Union[str, Any] = use_labels __snake_case : str = vocab_size __snake_case : int = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : int = num_attention_heads __snake_case : str = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : int = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : List[Any] = max_position_embeddings __snake_case : Any = type_vocab_size __snake_case : Dict = type_sequence_label_size __snake_case : Optional[Any] = initializer_range __snake_case : Union[str, Any] = num_labels __snake_case : Any = scope __snake_case : Any = range_bbox def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : List[str] = bbox[i, j, 3] __snake_case : Any = bbox[i, j, 1] __snake_case : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : List[str] = bbox[i, j, 2] __snake_case : Union[str, Any] = bbox[i, j, 0] __snake_case : Dict = t __snake_case : Optional[int] = None if self.use_input_mask: __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __snake_case : Dict = None if self.use_token_type_ids: __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : List[str] = None __snake_case : Union[str, Any] = None if self.use_labels: __snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ ) __snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ ) __snake_case : List[str] = model(a_ , bbox=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[int] = self.num_labels __snake_case : List[str] = LiltForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Tuple = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Dict = config_and_inputs __snake_case : Any = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ =( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =False lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' return True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModelTester(self ) __snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Dict = type self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = LiltModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @require_torch @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ ) __snake_case : Dict = torch.tensor([[1, 2]] , device=a_ ) __snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ ) # forward pass with torch.no_grad(): __snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ ) __snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] ) __snake_case : str = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , ) self.assertTrue(outputs.last_hidden_state.shape , a_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
24
"""simple docstring""" from collections.abc import Callable def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float: """simple docstring""" __snake_case : float = a __snake_case : float = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: __snake_case : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: __snake_case : List[str] = mid else: __snake_case : str = mid __snake_case : str = start + (end - start) / 2.0 return mid def lowercase ( _snake_case : float ) ->float: """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
24
1
"""simple docstring""" from __future__ import annotations import math from collections.abc import Callable def lowercase ( _snake_case : Callable[[int | float], int | float] , _snake_case : int | float , _snake_case : int | float , _snake_case : int = 100 , ) ->float: """simple docstring""" __snake_case : Optional[int] = x_start __snake_case : int = fnc(_snake_case ) __snake_case : List[str] = 0.0 for _ in range(_snake_case ): # Approximates curve as a sequence of linear lines and sums their length __snake_case : int = (x_end - x_start) / steps + xa __snake_case : Dict = fnc(_snake_case ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step __snake_case : Tuple = xa __snake_case : Dict = fxa return length if __name__ == "__main__": def lowercase ( _snake_case : List[Any] ) ->Optional[int]: """simple docstring""" return math.sin(10 * x ) print("""f(x) = sin(10 * x)""") print("""The length of the curve from x = -10 to x = 10 is:""") SCREAMING_SNAKE_CASE : Union[str, Any] = 10 while i <= 10_0000: print(F'With {i} steps: {line_length(f, -10, 10, i)}') i *= 10
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[str] = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : str = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[Any] = TypeVar("""DatasetType""", Dataset, IterableDataset) def lowercase ( _snake_case : List[DatasetType] , _snake_case : Optional[List[float]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[DatasetInfo] = None , _snake_case : Optional[NamedSplit] = None , _snake_case : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) ->DatasetType: """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('''Unable to interleave an empty list of datasets.''' ) for i, dataset in enumerate(_snake_case ): if not isinstance(_snake_case , (Dataset, IterableDataset) ): if isinstance(_snake_case , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ '''is an empty dataset dictionary.''' ) raise ValueError( f"""Dataset at position {i} has at least one split: {list(_snake_case )}\n""" f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_snake_case ) )}']""" ) raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_snake_case ).__name__}.""" ) if i == 0: __snake_case , __snake_case : Union[str, Any] = ( (Dataset, IterableDataset) if isinstance(_snake_case , _snake_case ) else (IterableDataset, Dataset) ) elif not isinstance(_snake_case , _snake_case ): raise ValueError( f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( _snake_case , _snake_case , _snake_case , info=_snake_case , split=_snake_case , stopping_strategy=_snake_case ) else: return _interleave_iterable_datasets( _snake_case , _snake_case , _snake_case , info=_snake_case , split=_snake_case , stopping_strategy=_snake_case ) def lowercase ( _snake_case : List[DatasetType] , _snake_case : Optional[DatasetInfo] = None , _snake_case : Optional[NamedSplit] = None , _snake_case : int = 0 , ) ->DatasetType: """simple docstring""" if not dsets: raise ValueError('''Unable to concatenate an empty list of datasets.''' ) for i, dataset in enumerate(_snake_case ): if not isinstance(_snake_case , (Dataset, IterableDataset) ): if isinstance(_snake_case , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ '''is an empty dataset dictionary.''' ) raise ValueError( f"""Dataset at position {i} has at least one split: {list(_snake_case )}\n""" f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_snake_case ) )}']""" ) raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_snake_case ).__name__}.""" ) if i == 0: __snake_case , __snake_case : Dict = ( (Dataset, IterableDataset) if isinstance(_snake_case , _snake_case ) else (IterableDataset, Dataset) ) elif not isinstance(_snake_case , _snake_case ): raise ValueError( f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(_snake_case , info=_snake_case , split=_snake_case , axis=_snake_case ) else: return _concatenate_iterable_datasets(_snake_case , info=_snake_case , split=_snake_case , axis=_snake_case )
24
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =['image_processor', 'tokenizer'] lowerCamelCase__ ='CLIPImageProcessor' lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self , a_=None , a_=None , **a_ ): '''simple docstring''' __snake_case : Any = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a_ , ) __snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' ) __snake_case : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a_ , a_ ) def __call__(self , a_=None , a_=None , a_=None , **a_ ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if images is not None: __snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: __snake_case : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.batch_decode(*a_ , **a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.decode(*a_ , **a_ ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.tokenizer.model_input_names __snake_case : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
24
1
"""simple docstring""" import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=None , a_=None , a_=None , a_="resnet50" , a_=3 , a_=32 , a_=3 , a_=True , a_=True , ): '''simple docstring''' __snake_case : Optional[int] = parent __snake_case : int = out_indices if out_indices is not None else [4] __snake_case : int = stage_names __snake_case : Union[str, Any] = out_features __snake_case : List[str] = backbone __snake_case : Union[str, Any] = batch_size __snake_case : Optional[Any] = image_size __snake_case : int = num_channels __snake_case : Optional[int] = use_pretrained_backbone __snake_case : Optional[Any] = is_training def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Optional[Any] = self.get_config() return config, pixel_values def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : int = TimmBackbone(config=a_ ) model.to(a_ ) model.eval() with torch.no_grad(): __snake_case : Optional[Any] = model(a_ ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.prepare_config_and_inputs() __snake_case , __snake_case : Union[str, Any] = config_and_inputs __snake_case : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch @require_timm class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =(TimmBackbone,) if is_torch_available() else () lowerCamelCase__ ={'feature-extraction': TimmBackbone} if is_torch_available() else {} lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = TimmBackboneModelTester(self ) __snake_case : Optional[int] = ConfigTester(self , config_class=a_ , has_text_modality=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = '''resnet18''' __snake_case : List[Any] = '''microsoft/resnet-18''' __snake_case : List[str] = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ ) __snake_case : Optional[int] = AutoBackbone.from_pretrained(a_ ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __snake_case : List[Any] = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ , out_indices=[1, 2, 3] ) __snake_case : Tuple = AutoBackbone.from_pretrained(a_ , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip('''TimmBackbone initialization is managed on the timm side''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip('''Safetensors is not supported by timm.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = model_class(a_ ) __snake_case : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : int = [*signature.parameters.keys()] __snake_case : Any = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : List[Any] = True __snake_case : Optional[int] = self.has_attentions # no need to test all models as different heads yield the same functionality __snake_case : Tuple = self.all_model_classes[0] __snake_case : Dict = model_class(a_ ) model.to(a_ ) __snake_case : Union[str, Any] = self._prepare_for_class(a_ , a_ ) __snake_case : int = model(**a_ ) __snake_case : Optional[int] = outputs[0][-1] # Encoder-/Decoder-only models __snake_case : int = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __snake_case : Tuple = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=a_ ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = model_class(a_ ) model.to(a_ ) model.eval() __snake_case : str = model(**a_ ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __snake_case : Dict = copy.deepcopy(a_ ) __snake_case : List[str] = None __snake_case : List[str] = model_class(a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(**a_ ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __snake_case : str = copy.deepcopy(a_ ) __snake_case : List[Any] = False __snake_case : Optional[int] = model_class(a_ ) model.to(a_ ) model.eval() __snake_case : str = model(**a_ )
24
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE : List[Any] = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE : Tuple = { """facebook/mbart-large-en-ro""": 1024, """facebook/mbart-large-cc25""": 1024, } # fmt: off SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =['input_ids', 'attention_mask'] lowerCamelCase__ =MBartTokenizer lowerCamelCase__ =[] lowerCamelCase__ =[] def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token super().__init__( vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , ) __snake_case : Tuple = vocab_file __snake_case : Optional[Any] = False if not self.vocab_file else True __snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) __snake_case : Optional[int] = { lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX''' __snake_case : Any = self.convert_tokens_to_ids(self._src_lang ) __snake_case : Dict = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' __snake_case : Tuple = [self.sep_token_id] __snake_case : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __snake_case : Optional[int] = src_lang __snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ ) __snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ ) __snake_case : int = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ): '''simple docstring''' __snake_case : int = src_lang __snake_case : List[Any] = tgt_lang return super().prepare_seqaseq_batch(a_ , a_ , **a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : List[Any] = [] __snake_case : Any = [self.eos_token_id, self.cur_lang_code] __snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : Optional[Any] = [] __snake_case : Dict = [self.eos_token_id, self.cur_lang_code] __snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(a_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return __snake_case : Optional[Any] = os.path.join( a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file , a_ ) return (out_vocab_file,)
24
1
"""simple docstring""" SCREAMING_SNAKE_CASE : dict[str, float] = { "joule": 1.0, "kilojoule": 1000, "megajoule": 100_0000, "gigajoule": 10_0000_0000, "wattsecond": 1.0, "watthour": 3600, "kilowatthour": 360_0000, "newtonmeter": 1.0, "calorie_nutr": 4186.8, "kilocalorie_nutr": 418_6800.00, "electronvolt": 1.602176634E-19, "britishthermalunit_it": 1055.0_5585, "footpound": 1.355_818, } def lowercase ( _snake_case : str , _snake_case : str , _snake_case : float ) ->float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: __snake_case : Optional[Any] = ( f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" f"""Valid values are: {', '.join(_snake_case )}""" ) raise ValueError(_snake_case ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__) @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None if is_torch_available(): import torch from torch.utils.data import Dataset class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ): '''simple docstring''' __snake_case : Any = hans_processors[task]() __snake_case : int = os.path.join( a_ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , ) __snake_case : Tuple = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Dict = label_list[2], label_list[1] __snake_case : Any = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case : int = cached_features_file + '''.lock''' with FileLock(a_ ): if os.path.exists(a_ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case : Union[str, Any] = torch.load(a_ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case : Dict = ( processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) ) logger.info('''Training examples: %s''' , len(a_ ) ) __snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) logger.info('''Saving features into cached file %s''' , a_ ) torch.save(self.features , a_ ) def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list if is_tf_available(): import tensorflow as tf class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ): '''simple docstring''' __snake_case : List[Any] = hans_processors[task]() __snake_case : str = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Tuple = label_list[2], label_list[1] __snake_case : Dict = label_list __snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) __snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_00_00 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case : Union[str, Any] = tf.data.Dataset.from_generator( a_ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.dataset def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return ["contradiction", "entailment", "neutral"] def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = [] for i, line in enumerate(a_ ): if i == 0: continue __snake_case : Tuple = '''%s-%s''' % (set_type, line[0]) __snake_case : Dict = line[5] __snake_case : int = line[6] __snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case : List[Any] = line[0] examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) ) return examples def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]: """simple docstring""" __snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )} __snake_case : Tuple = [] for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ): if ex_index % 10_000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case : List[Any] = tokenizer( example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , ) __snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0 __snake_case : Union[str, Any] = int(example.pairID ) features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features SCREAMING_SNAKE_CASE : Dict = { """hans""": 3, } SCREAMING_SNAKE_CASE : str = { """hans""": HansProcessor, }
24
1
"""simple docstring""" from collections.abc import Sequence from queue import Queue class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_ , a_ , a_=None , a_=None ): '''simple docstring''' __snake_case : Optional[Any] = start __snake_case : Optional[int] = end __snake_case : Optional[Any] = val __snake_case : List[Any] = (start + end) // 2 __snake_case : Union[str, Any] = left __snake_case : str = right def __repr__(self ): '''simple docstring''' return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})""" class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_ ): '''simple docstring''' __snake_case : Optional[Any] = collection __snake_case : Optional[Any] = function if self.collection: __snake_case : Optional[Any] = self._build_tree(0 , len(a_ ) - 1 ) def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' self._update_tree(self.root , a_ , a_ ) def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' return self._query_range(self.root , a_ , a_ ) def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' if start == end: return SegmentTreeNode(a_ , a_ , self.collection[start] ) __snake_case : str = (start + end) // 2 __snake_case : Dict = self._build_tree(a_ , a_ ) __snake_case : Optional[int] = self._build_tree(mid + 1 , a_ ) return SegmentTreeNode(a_ , a_ , self.fn(left.val , right.val ) , a_ , a_ ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' if node.start == i and node.end == i: __snake_case : str = val return if i <= node.mid: self._update_tree(node.left , a_ , a_ ) else: self._update_tree(node.right , a_ , a_ ) __snake_case : Optional[Any] = self.fn(node.left.val , node.right.val ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , a_ , a_ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , a_ , node.mid ) , self._query_range(node.right , node.mid + 1 , a_ ) , ) else: # range in right child tree return self._query_range(node.right , a_ , a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if self.root is not None: __snake_case : str = Queue() queue.put(self.root ) while not queue.empty(): __snake_case : List[Any] = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print("""*""" * 50) SCREAMING_SNAKE_CASE : Union[str, Any] = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
24
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[str] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='gptsan-japanese' lowerCamelCase__ =[ 'past_key_values', ] lowerCamelCase__ ={ 'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__(self , a_=3_60_00 , a_=12_80 , a_=10_24 , a_=81_92 , a_=40_96 , a_=1_28 , a_=10 , a_=0 , a_=16 , a_=16 , a_=1_28 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_59_98 , a_=3_59_95 , a_=3_59_99 , **a_ , ): '''simple docstring''' __snake_case : Any = vocab_size __snake_case : str = max_position_embeddings __snake_case : Any = d_model __snake_case : List[str] = d_ff __snake_case : Dict = d_ext __snake_case : Optional[Any] = d_spout __snake_case : int = num_switch_layers __snake_case : List[Any] = num_ext_layers __snake_case : Any = num_switch_layers + num_ext_layers __snake_case : Optional[int] = num_heads __snake_case : Tuple = num_experts __snake_case : List[Any] = expert_capacity __snake_case : Dict = dropout_rate __snake_case : Optional[Any] = layer_norm_epsilon __snake_case : Dict = router_bias __snake_case : str = router_jitter_noise __snake_case : List[str] = router_dtype __snake_case : Union[str, Any] = router_ignore_padding_tokens __snake_case : List[str] = output_hidden_states __snake_case : Optional[Any] = output_attentions __snake_case : Any = initializer_factor __snake_case : int = output_router_logits __snake_case : Union[str, Any] = use_cache super().__init__( separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
24
1
"""simple docstring""" import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =ComputeEnvironment.AMAZON_SAGEMAKER lowerCamelCase__ =True lowerCamelCase__ ='ml.p3.2xlarge' lowerCamelCase__ ='accelerate_sagemaker_execution_role' lowerCamelCase__ ='hf-sm' lowerCamelCase__ ='us-east-1' lowerCamelCase__ =1 lowerCamelCase__ ='accelerate-sagemaker-1' lowerCamelCase__ ='1.6' lowerCamelCase__ ='4.4' lowerCamelCase__ ='train.py' lowerCamelCase__ =[ '--model_name_or_path', 'bert', '--do_train', 'False', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] lowerCamelCase__ =[ '--model_name_or_path', 'bert', '--do_train', '--do_test', 'False', '--do_predict', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args['''model_name_or_path'''] , a_ ) assert isinstance(converted_args['''do_train'''] , a_ ) assert isinstance(converted_args['''epochs'''] , a_ ) assert isinstance(converted_args['''learning_rate'''] , a_ ) assert isinstance(converted_args['''max_steps'''] , a_ ) with pytest.raises(a_ ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
24
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } SCREAMING_SNAKE_CASE : int = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def lowercase ( _snake_case : Optional[int] ) ->int: """simple docstring""" __snake_case : int = {} with open(_snake_case , '''r''' ) as file: for line_number, line in enumerate(_snake_case ): __snake_case : Union[str, Any] = line.strip() if line: __snake_case : str = line.split() __snake_case : Union[str, Any] = line_number __snake_case : Dict = words[0] __snake_case : str = value return result def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]: """simple docstring""" for attribute in key.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : str = '''param''' if weight_type is not None and weight_type != "param": __snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape elif weight_type is not None and weight_type == "param": __snake_case : Optional[Any] = hf_pointer for attribute in hf_param_name.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : List[str] = shape_pointer.shape # let's reduce dimension __snake_case : int = value[0] else: __snake_case : int = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __snake_case : List[Any] = value elif weight_type == "weight_g": __snake_case : Tuple = value elif weight_type == "weight_v": __snake_case : str = value elif weight_type == "bias": __snake_case : str = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __snake_case : List[Any] = getattr(_snake_case , _snake_case ) __snake_case : int = value else: __snake_case : List[Any] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int: """simple docstring""" __snake_case : Optional[Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : List[str] = '''param''' if weight_type is not None and weight_type != "param": __snake_case : str = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __snake_case : Tuple = '''.'''.join([key, hf_param_name] ) else: __snake_case : Optional[int] = key __snake_case : List[Any] = value if '''lm_head''' in full_key else value[0] SCREAMING_SNAKE_CASE : Tuple = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict: """simple docstring""" __snake_case : Tuple = False for key, mapped_key in MAPPING.items(): __snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __snake_case : int = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2] __snake_case : Tuple = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: __snake_case : Union[str, Any] = '''weight_g''' elif "weight_v" in name: __snake_case : List[str] = '''weight_v''' elif "bias" in name: __snake_case : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case : List[Any] = '''weight''' else: __snake_case : Union[str, Any] = None if hf_dict is not None: rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) else: set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) return is_used return is_used def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any: """simple docstring""" __snake_case : Union[str, Any] = [] __snake_case : Union[str, Any] = fairseq_model.state_dict() __snake_case : str = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __snake_case : str = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) __snake_case : Union[str, Any] = True else: __snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case ) if not is_used: unused_weights.append(_snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1] __snake_case : str = name.split('''.''' ) __snake_case : Optional[int] = int(items[0] ) __snake_case : Any = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __snake_case : int = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __snake_case : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_snake_case ) @torch.no_grad() def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict: """simple docstring""" if config_path is not None: __snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case ) else: __snake_case : Tuple = WavaVecaConfig() if is_seq_class: __snake_case : Optional[int] = read_txt_into_dict(_snake_case ) __snake_case : List[Any] = idalabel __snake_case : int = WavaVecaForSequenceClassification(_snake_case ) __snake_case : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) feature_extractor.save_pretrained(_snake_case ) elif is_finetuned: if dict_path: __snake_case : int = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Tuple = target_dict.pad_index __snake_case : int = target_dict.bos_index __snake_case : Tuple = target_dict.eos_index __snake_case : Optional[Any] = len(target_dict.symbols ) __snake_case : Any = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) __snake_case : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched __snake_case : Dict = 0 __snake_case : List[Any] = 1 with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_snake_case , _snake_case ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) __snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False __snake_case : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) __snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) __snake_case : Optional[int] = WavaVecaForCTC(_snake_case ) else: __snake_case : Tuple = WavaVecaForPreTraining(_snake_case ) if is_finetuned or is_seq_class: __snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' ) __snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case ) __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case ) __snake_case : int = model[0].eval() recursively_load_weights(_snake_case , _snake_case , not is_finetuned ) hf_wavavec.save_pretrained(_snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) SCREAMING_SNAKE_CASE : Any = parser.parse_args() SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
24
1
"""simple docstring""" def lowercase ( _snake_case : Any ) ->Optional[Any]: """simple docstring""" __snake_case : Any = [0] * len(_snake_case ) __snake_case : Dict = [] __snake_case : Union[str, Any] = [] __snake_case : Optional[int] = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_snake_case ) ): if indegree[i] == 0: queue.append(_snake_case ) while queue: __snake_case : Dict = queue.pop(0 ) cnt += 1 topo.append(_snake_case ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_snake_case ) if cnt != len(_snake_case ): print('''Cycle exists''' ) else: print(_snake_case ) # Adjacency List of Graph SCREAMING_SNAKE_CASE : Optional[Any] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
24
"""simple docstring""" from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=__snake_case ): '''simple docstring''' lowerCamelCase__ =['transformers', 'torch', 'note_seq'] def __init__(self , *a_ , **a_ ): '''simple docstring''' requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
24
1
"""simple docstring""" import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=10_00 , ): '''simple docstring''' __snake_case : int = parent __snake_case : Union[str, Any] = batch_size __snake_case : Optional[Any] = num_channels __snake_case : str = image_size __snake_case : Tuple = patch_size __snake_case : str = text_seq_length __snake_case : List[Any] = is_training __snake_case : List[str] = use_input_mask __snake_case : List[str] = use_token_type_ids __snake_case : List[str] = use_labels __snake_case : Any = vocab_size __snake_case : List[str] = hidden_size __snake_case : int = num_hidden_layers __snake_case : str = num_attention_heads __snake_case : Union[str, Any] = intermediate_size __snake_case : str = hidden_act __snake_case : List[Any] = hidden_dropout_prob __snake_case : Tuple = attention_probs_dropout_prob __snake_case : str = max_position_embeddings __snake_case : Any = type_vocab_size __snake_case : Optional[int] = type_sequence_label_size __snake_case : Optional[int] = initializer_range __snake_case : List[Any] = coordinate_size __snake_case : List[str] = shape_size __snake_case : Optional[int] = num_labels __snake_case : Dict = num_choices __snake_case : Any = scope __snake_case : List[Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __snake_case : int = text_seq_length __snake_case : Optional[Any] = (image_size // patch_size) ** 2 + 1 __snake_case : List[str] = self.text_seq_length + self.image_seq_length def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __snake_case : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : Dict = bbox[i, j, 3] __snake_case : List[str] = bbox[i, j, 1] __snake_case : Optional[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : Any = bbox[i, j, 2] __snake_case : Optional[Any] = bbox[i, j, 0] __snake_case : List[str] = t __snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : int = None if self.use_input_mask: __snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) __snake_case : List[Any] = None if self.use_token_type_ids: __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __snake_case : Union[str, Any] = None __snake_case : List[Any] = None if self.use_labels: __snake_case : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __snake_case : List[str] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Tuple = LayoutLMvaModel(config=a_ ) model.to(a_ ) model.eval() # text + image __snake_case : Dict = model(a_ , pixel_values=a_ ) __snake_case : Union[str, Any] = model( a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ ) __snake_case : Union[str, Any] = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ ) __snake_case : str = model(a_ , bbox=a_ , pixel_values=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __snake_case : List[str] = model(a_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __snake_case : Any = model(pixel_values=a_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : int = self.num_labels __snake_case : Optional[int] = LayoutLMvaForSequenceClassification(a_ ) model.to(a_ ) model.eval() __snake_case : List[str] = model( a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Tuple = self.num_labels __snake_case : List[Any] = LayoutLMvaForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Any = model( a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Optional[int] = LayoutLMvaForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model( a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[Any] = config_and_inputs __snake_case : List[Any] = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ =( {'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel} if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' return True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = LayoutLMvaModelTester(self ) __snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_=False ): '''simple docstring''' __snake_case : int = copy.deepcopy(a_ ) if model_class in get_values(a_ ): __snake_case : Any = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(a_ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(a_ ): __snake_case : Tuple = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ ) elif model_class in get_values(a_ ): __snake_case : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) __snake_case : Optional[int] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) elif model_class in [ *get_values(a_ ), ]: __snake_case : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) elif model_class in [ *get_values(a_ ), ]: __snake_case : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , ) return inputs_dict def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Optional[int] = type self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Optional[int] = LayoutLMvaModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def lowercase ( ) ->List[Any]: """simple docstring""" __snake_case : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(a_ ) __snake_case : List[Any] = self.default_image_processor __snake_case : List[Any] = prepare_img() __snake_case : int = image_processor(images=a_ , return_tensors='''pt''' ).pixel_values.to(a_ ) __snake_case : Tuple = torch.tensor([[1, 2]] ) __snake_case : Union[str, Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass __snake_case : Tuple = model( input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , ) # verify the logits __snake_case : Union[str, Any] = torch.Size((1, 1_99, 7_68) ) self.assertEqual(outputs.last_hidden_state.shape , a_ ) __snake_case : List[str] = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1E-4 ) )
24
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ): '''simple docstring''' __snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20} __snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __snake_case : Tuple = parent __snake_case : Tuple = batch_size __snake_case : Tuple = num_channels __snake_case : List[str] = image_size __snake_case : Optional[Any] = min_resolution __snake_case : List[Any] = max_resolution __snake_case : List[Any] = do_resize __snake_case : Dict = size __snake_case : Dict = do_center_crop __snake_case : Dict = crop_size __snake_case : str = do_flip_channel_order def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _UpperCAmelCase ( __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = MobileViTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , '''do_resize''' ) ) self.assertTrue(hasattr(a_ , '''size''' ) ) self.assertTrue(hasattr(a_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(a_ , '''center_crop''' ) ) self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input __snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input __snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
24
1
"""simple docstring""" import requests def lowercase ( _snake_case : str , _snake_case : str ) ->None: """simple docstring""" __snake_case : Dict = {'''Content-Type''': '''application/json'''} __snake_case : Optional[Any] = requests.post(_snake_case , json={'''text''': message_body} , headers=_snake_case ) if response.status_code != 200: __snake_case : Union[str, Any] = ( '''Request to slack returned an error ''' f"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(_snake_case ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
24
"""simple docstring""" import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def lowercase ( ) ->Optional[int]: """simple docstring""" __snake_case : int = torch.nn.Linear(2 , 4 ) __snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 ) __snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) __snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) __snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def lowercase ( _snake_case : str ) ->Optional[Any]: """simple docstring""" return (model.weight.abs().sum() + model.bias.abs().sum()).item() def lowercase ( _snake_case : Union[str, Any] ) ->Tuple: """simple docstring""" __snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(_snake_case ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(a_ ): __snake_case : Any = Accelerator(cpu=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case : Optional[int] = GradientState() assert state.num_steps == 1 __snake_case : str = 4 assert state.num_steps == 4 assert state.sync_gradients is True __snake_case : List[Any] = False assert state.sync_gradients is False GradientState._reset_state() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*a_ , **a_ ): pass with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ): __snake_case : List[Any] = Accelerator() self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : Any = get_signature(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # make sure loaded weights match accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : List[Any] = get_signature(a_ ) # saving hook def save_config(a_ , a_ , a_ ): __snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__} with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f: json.dump(a_ , a_ ) # loading hook def load_config(a_ , a_ ): with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f: __snake_case : Any = json.load(a_ ) __snake_case : List[str] = config['''class_name'''] __snake_case : str = accelerator.register_save_state_pre_hook(a_ ) __snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Any = '''random''' # make sure loaded weights match with hooks accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks removed load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Union[str, Any] = '''random''' # make sure loaded weights match with hooks removed accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components() __snake_case : Union[str, Any] = None # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertTrue(dummy_obj is None ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() __snake_case : Optional[int] = [1, 2, 3] # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , ) __snake_case : Optional[Any] = Accelerator() # This should work __snake_case : Any = accelerator.prepare(a_ ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Any = Accelerator() with init_empty_weights(): __snake_case : List[str] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : Union[str, Any] = infer_auto_device_map(a_ ) __snake_case : str = '''cpu''' __snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ ) # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Dict = accelerator.prepare(a_ ) @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU} with init_empty_weights(): __snake_case : Any = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : List[Any] = infer_auto_device_map(a_ ) __snake_case : Dict = 1 __snake_case : str = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Any = Accelerator() # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Tuple = accelerator.prepare(a_ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM with init_empty_weights(): __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) __snake_case : Tuple = infer_auto_device_map(a_ ) __snake_case : Tuple = 1 __snake_case : List[Any] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Tuple = Accelerator() # This should work __snake_case : Dict = accelerator.prepare(a_ ) @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = torch.nn.Linear(10 , 10 ) __snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 ) __snake_case : Optional[Any] = Accelerator(cpu=a_ ) __snake_case : str = accelerator.prepare(a_ )
24
1
"""simple docstring""" import math def lowercase ( _snake_case : int ) ->bool: """simple docstring""" assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False __snake_case : Optional[Any] = range(3 , int(math.sqrt(_snake_case ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def lowercase ( _snake_case : Dict , _snake_case : int=1 , **_snake_case : Optional[int] ) ->Optional[Any]: """simple docstring""" __snake_case : Tuple = factor * value __snake_case : int = value while not is_prime(_snake_case ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **_snake_case ) return value
24
"""simple docstring""" def lowercase ( _snake_case : int ) ->str: """simple docstring""" if number > 0: raise ValueError('''input must be a negative integer''' ) __snake_case : Any = len(bin(_snake_case )[3:] ) __snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:] __snake_case : Dict = ( ( '''1''' + '''0''' * (binary_number_length - len(_snake_case )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE : Optional[Any] = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : str = ["""XGLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Dict = ["""XGLMTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Union[str, Any] = [ """XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XGLMForCausalLM""", """XGLMModel""", """XGLMPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : int = [ """FlaxXGLMForCausalLM""", """FlaxXGLMModel""", """FlaxXGLMPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : int = [ """TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXGLMForCausalLM""", """TFXGLMModel""", """TFXGLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
24
"""simple docstring""" def lowercase ( ) ->int: """simple docstring""" return [ a * b * (1_000 - a - b) for a in range(1 , 999 ) for b in range(_snake_case , 999 ) if (a * a + b * b == (1_000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
24
1
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Any = { """t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""", } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='t5' lowerCamelCase__ =['past_key_values'] lowerCamelCase__ ={'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__(self , a_=3_21_28 , a_=5_12 , a_=64 , a_=20_48 , a_=6 , a_=None , a_=8 , a_=32 , a_=1_28 , a_=0.1 , a_=1E-6 , a_=1.0 , a_="relu" , a_=True , a_=True , a_=0 , a_=1 , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = vocab_size __snake_case : Dict = d_model __snake_case : str = d_kv __snake_case : Optional[int] = d_ff __snake_case : Optional[Any] = num_layers __snake_case : Optional[int] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __snake_case : List[str] = num_heads __snake_case : Optional[Any] = relative_attention_num_buckets __snake_case : str = relative_attention_max_distance __snake_case : Optional[Any] = dropout_rate __snake_case : List[str] = layer_norm_epsilon __snake_case : Optional[Any] = initializer_factor __snake_case : Tuple = feed_forward_proj __snake_case : Tuple = use_cache __snake_case : str = self.feed_forward_proj.split('''-''' ) __snake_case : Optional[Any] = act_info[-1] __snake_case : Any = act_info[0] == '''gated''' if len(a_ ) > 1 and act_info[0] != "gated" or len(a_ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __snake_case : List[Any] = '''gelu_new''' super().__init__( pad_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , **a_ , ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = { '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: __snake_case : Optional[Any] = '''past_encoder_sequence + sequence''' __snake_case : List[str] = {0: '''batch'''} __snake_case : str = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: __snake_case : int = {0: '''batch''', 1: '''decoder_sequence'''} __snake_case : Tuple = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(a_ , direction='''inputs''' ) return common_inputs @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return 13
24
"""simple docstring""" def lowercase ( _snake_case : int = 100 ) ->int: """simple docstring""" __snake_case : str = n * (n + 1) * (2 * n + 1) / 6 __snake_case : Dict = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'{solution() = }')
24
1
"""simple docstring""" from __future__ import annotations def lowercase ( _snake_case : float , _snake_case : float , _snake_case : float ) ->dict[str, float]: """simple docstring""" if (voltage, current, resistance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if resistance < 0: raise ValueError('''Resistance cannot be negative''' ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__) @dataclass class _UpperCAmelCase ( datasets.BuilderConfig ): '''simple docstring''' lowerCamelCase__ =10000 lowerCamelCase__ =None lowerCamelCase__ =None class _UpperCAmelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' lowerCamelCase__ =ParquetConfig def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __snake_case : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a_ , (str, list, tuple) ): __snake_case : Union[str, Any] = data_files if isinstance(a_ , a_ ): __snake_case : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __snake_case : int = [] for split_name, files in data_files.items(): if isinstance(a_ , a_ ): __snake_case : List[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : int = [dl_manager.iter_files(a_ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(a_ ): with open(a_ , '''rb''' ) as f: __snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) ) break splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) ) return splits def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ): with open(a_ , '''rb''' ) as f: __snake_case : int = pq.ParquetFile(a_ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __snake_case : Dict = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" ) raise
24
1
"""simple docstring""" import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _UpperCAmelCase ( __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =GPTaTokenizer lowerCamelCase__ =GPTaTokenizerFast lowerCamelCase__ =True lowerCamelCase__ ={'add_prefix_space': True} lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __snake_case : Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] __snake_case : int = dict(zip(a_ , range(len(a_ ) ) ) ) __snake_case : Optional[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __snake_case : int = {'''unk_token''': '''<unk>'''} __snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(a_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(a_ ) ) def SCREAMING_SNAKE_CASE (self , **a_ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **a_ ) def SCREAMING_SNAKE_CASE (self , **a_ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a_ ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Optional[Any] = '''lower newer''' __snake_case : Tuple = '''lower newer''' return input_text, output_text def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __snake_case : Optional[Any] = '''lower newer''' __snake_case : int = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __snake_case : Tuple = tokenizer.tokenize(a_ , add_prefix_space=a_ ) self.assertListEqual(a_ , a_ ) __snake_case : str = tokens + [tokenizer.unk_token] __snake_case : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if not self.test_rust_tokenizer: return __snake_case : Optional[Any] = self.get_tokenizer() __snake_case : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a_ ) __snake_case : List[Any] = '''lower newer''' # Testing tokenization __snake_case : Any = tokenizer.tokenize(a_ , add_prefix_space=a_ ) __snake_case : Optional[Any] = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) # Testing conversion to ids without special tokens __snake_case : Dict = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ ) __snake_case : int = rust_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) # Testing conversion to ids with special tokens __snake_case : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=a_ ) __snake_case : Dict = tokenizer.encode(a_ , add_prefix_space=a_ ) __snake_case : List[Any] = rust_tokenizer.encode(a_ ) self.assertListEqual(a_ , a_ ) # Testing the unknown token __snake_case : Dict = tokens + [rust_tokenizer.unk_token] __snake_case : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ) , a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self , a_=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(a_ , **a_ ) # Simple input __snake_case : Optional[Any] = '''This is a simple input''' __snake_case : int = ['''This is a simple input 1''', '''This is a simple input 2'''] __snake_case : List[Any] = ('''This is a simple input''', '''This is a pair''') __snake_case : List[Any] = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding='''max_length''' ) # Simple input self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding='''max_length''' ) # Simple input self.assertRaises( a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding='''max_length''' , ) # Pair input self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding='''max_length''' ) # Pair input self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding='''max_length''' ) # Pair input self.assertRaises( a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding='''max_length''' , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input __snake_case : Dict = '''This is a simple input''' __snake_case : List[Any] = ['''This is a simple input looooooooong''', '''This is a simple input'''] __snake_case : Optional[Any] = ('''This is a simple input''', '''This is a pair''') __snake_case : List[str] = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] __snake_case : Optional[int] = tokenizer.pad_token_id __snake_case : Tuple = tokenizer(a_ , padding='''max_length''' , max_length=30 , return_tensors='''np''' ) __snake_case : Union[str, Any] = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors='''np''' ) __snake_case : List[Any] = tokenizer(*a_ , padding='''max_length''' , max_length=60 , return_tensors='''np''' ) __snake_case : Dict = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = '''$$$''' __snake_case : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a_ , add_bos_token=a_ ) __snake_case : Any = '''This is a simple input''' __snake_case : List[Any] = ['''This is a simple input 1''', '''This is a simple input 2'''] __snake_case : List[Any] = tokenizer.bos_token_id __snake_case : Optional[int] = tokenizer(a_ ) __snake_case : List[Any] = tokenizer(a_ ) self.assertEqual(out_s.input_ids[0] , a_ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __snake_case : Optional[int] = tokenizer.decode(out_s.input_ids ) __snake_case : Optional[int] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , a_ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = [self.get_tokenizer(do_lower_case=a_ , add_bos_token=a_ )] for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __snake_case : str = '''Encode this.''' __snake_case : Optional[int] = '''This one too please.''' __snake_case : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ ) encoded_sequence += tokenizer.encode(a_ , add_special_tokens=a_ ) __snake_case : int = tokenizer.encode_plus( a_ , a_ , add_special_tokens=a_ , return_special_tokens_mask=a_ , ) __snake_case : List[str] = encoded_sequence_dict['''input_ids'''] __snake_case : Optional[Any] = encoded_sequence_dict['''special_tokens_mask'''] self.assertEqual(len(a_ ) , len(a_ ) ) __snake_case : Optional[int] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(a_ ) ] __snake_case : List[str] = [x for x in filtered_sequence if x is not None] self.assertEqual(a_ , a_ ) @require_tokenizers class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=a_ ) __snake_case : List[Any] = '''A photo of a cat''' __snake_case : Union[str, Any] = tokenizer.encode( a_ , ) self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] ) tokenizer.save_pretrained('''test_opt''' ) __snake_case : Tuple = AutoTokenizer.from_pretrained('''./test_opt''' ) __snake_case : int = tokenizer.encode( a_ , ) self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=a_ ) __snake_case : int = '''A photo of a cat''' __snake_case : Tuple = tokenizer.encode( a_ , ) # Same as above self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] ) @unittest.skip('''This test is failing because of a bug in the fast tokenizer''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=a_ ) __snake_case : Any = '''bos''' __snake_case : List[Any] = tokenizer.get_vocab()['''bos'''] __snake_case : Any = '''A photo of a cat''' __snake_case : Tuple = tokenizer.encode( a_ , ) # We changed the bos token self.assertEqual(a_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] ) tokenizer.save_pretrained('''./tok''' ) __snake_case : Tuple = AutoTokenizer.from_pretrained('''./tok''' ) self.assertTrue(tokenizer.is_fast ) __snake_case : List[Any] = tokenizer.encode( a_ , ) self.assertEqual(a_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
24
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): __snake_case : Dict = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = '''sshleifer/tiny-gpt2''' __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = '''sgugger/tiny-distilbert-classification''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , ) __snake_case : Optional[Any] = TensorFlowBenchmark(a_ ) __snake_case : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Any = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ ) __snake_case : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = '''sshleifer/tiny-gpt2''' __snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Dict = TensorFlowBenchmark(a_ , [config] ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : int = TensorFlowBenchmark(a_ ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Dict = AutoConfig.from_pretrained(a_ ) __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random''' __snake_case : Tuple = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] ) __snake_case : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , ) __snake_case : Union[str, Any] = TensorFlowBenchmark(a_ ) benchmark.run() self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(a_ ): self.assertTrue(hasattr(a_ , '''sequential''' ) ) self.assertTrue(hasattr(a_ , '''cumulative''' ) ) self.assertTrue(hasattr(a_ , '''current''' ) ) self.assertTrue(hasattr(a_ , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ ) __snake_case : Optional[int] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
24
1
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): __snake_case : Dict = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = '''sshleifer/tiny-gpt2''' __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = '''sgugger/tiny-distilbert-classification''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , ) __snake_case : Optional[Any] = TensorFlowBenchmark(a_ ) __snake_case : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Any = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ ) __snake_case : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = '''sshleifer/tiny-gpt2''' __snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Dict = TensorFlowBenchmark(a_ , [config] ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : int = TensorFlowBenchmark(a_ ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Dict = AutoConfig.from_pretrained(a_ ) __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random''' __snake_case : Tuple = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] ) __snake_case : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , ) __snake_case : Union[str, Any] = TensorFlowBenchmark(a_ ) benchmark.run() self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(a_ ): self.assertTrue(hasattr(a_ , '''sequential''' ) ) self.assertTrue(hasattr(a_ , '''cumulative''' ) ) self.assertTrue(hasattr(a_ , '''current''' ) ) self.assertTrue(hasattr(a_ , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ ) __snake_case : Optional[int] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
24
"""simple docstring""" import logging import os import threading import time try: import warnings except ImportError: SCREAMING_SNAKE_CASE : Tuple = None try: import msvcrt except ImportError: SCREAMING_SNAKE_CASE : List[str] = None try: import fcntl except ImportError: SCREAMING_SNAKE_CASE : Tuple = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: SCREAMING_SNAKE_CASE : List[str] = OSError # Data # ------------------------------------------------ SCREAMING_SNAKE_CASE : List[Any] = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] SCREAMING_SNAKE_CASE : List[Any] = """3.0.12""" SCREAMING_SNAKE_CASE : int = None def lowercase ( ) ->str: """simple docstring""" global _logger __snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ ) return _logger class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[int] = lock_file return None def __str__(self ): '''simple docstring''' __snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired.""" return temp class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[Any] = lock return None def __enter__(self ): '''simple docstring''' return self.lock def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.lock.release() return None class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long __snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ ) # The path to the lock file. __snake_case : str = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __snake_case : Dict = None # The default timeout value. __snake_case : List[Any] = timeout # We use this lock primarily for the lock counter. __snake_case : Tuple = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __snake_case : Optional[Any] = 0 return None @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._timeout @timeout.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Dict = float(a_ ) return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file_fd is not None def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ): '''simple docstring''' if timeout is None: __snake_case : List[str] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __snake_case : Optional[int] = id(self ) __snake_case : str = self._lock_file __snake_case : Optional[int] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(a_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __snake_case : Optional[int] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def SCREAMING_SNAKE_CASE (self , a_=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __snake_case : Tuple = id(self ) __snake_case : str = self._lock_file logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __snake_case : Dict = 0 logger().debug(f"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__(self ): '''simple docstring''' self.acquire() return self def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.release() return None def __del__(self ): '''simple docstring''' self.release(force=a_ ) return None def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : Any = os.path.basename(a_ ) if len(a_ ) > max_length and max_length > 0: __snake_case : List[Any] = os.path.dirname(a_ ) __snake_case : Any = str(hash(a_ ) ) __snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(a_ , a_ ) else: return path class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) __snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __snake_case : Any = os.open(self._lock_file , a_ ) except OSError: pass else: try: msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(a_ ) else: __snake_case : Dict = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Dict = None msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 ) os.close(a_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC __snake_case : List[str] = os.open(self._lock_file , a_ ) try: fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(a_ ) else: __snake_case : Optional[int] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Tuple = None fcntl.flock(a_ , fcntl.LOCK_UN ) os.close(a_ ) return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __snake_case : Tuple = os.open(self._lock_file , a_ ) except OSError: pass else: __snake_case : List[Any] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' os.close(self._lock_file_fd ) __snake_case : int = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None SCREAMING_SNAKE_CASE : Dict = None if msvcrt: SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock elif fcntl: SCREAMING_SNAKE_CASE : List[str] = UnixFileLock else: SCREAMING_SNAKE_CASE : str = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
24
1
"""simple docstring""" SCREAMING_SNAKE_CASE : Dict = [ (1000, """M"""), (900, """CM"""), (500, """D"""), (400, """CD"""), (100, """C"""), (90, """XC"""), (50, """L"""), (40, """XL"""), (10, """X"""), (9, """IX"""), (5, """V"""), (4, """IV"""), (1, """I"""), ] def lowercase ( _snake_case : str ) ->int: """simple docstring""" __snake_case : Optional[int] = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000} __snake_case : Optional[Any] = 0 __snake_case : List[Any] = 0 while place < len(_snake_case ): if (place + 1 < len(_snake_case )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def lowercase ( _snake_case : int ) ->str: """simple docstring""" __snake_case : Union[str, Any] = [] for arabic, roman in ROMAN: ((__snake_case) , (__snake_case)) : List[str] = divmod(_snake_case , _snake_case ) result.append(roman * factor ) if number == 0: break return "".join(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ): '''simple docstring''' __snake_case : Any = parent __snake_case : int = batch_size __snake_case : Dict = seq_length __snake_case : List[str] = is_training __snake_case : List[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : Union[str, Any] = use_labels __snake_case : str = vocab_size __snake_case : int = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : int = num_attention_heads __snake_case : str = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : int = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : List[Any] = max_position_embeddings __snake_case : Any = type_vocab_size __snake_case : Dict = type_sequence_label_size __snake_case : Optional[Any] = initializer_range __snake_case : Union[str, Any] = num_labels __snake_case : Any = scope __snake_case : Any = range_bbox def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : List[str] = bbox[i, j, 3] __snake_case : Any = bbox[i, j, 1] __snake_case : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : List[str] = bbox[i, j, 2] __snake_case : Union[str, Any] = bbox[i, j, 0] __snake_case : Dict = t __snake_case : Optional[int] = None if self.use_input_mask: __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __snake_case : Dict = None if self.use_token_type_ids: __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : List[str] = None __snake_case : Union[str, Any] = None if self.use_labels: __snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ ) __snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ ) __snake_case : List[str] = model(a_ , bbox=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[int] = self.num_labels __snake_case : List[str] = LiltForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Tuple = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Dict = config_and_inputs __snake_case : Any = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ =( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =False lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' return True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModelTester(self ) __snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Dict = type self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = LiltModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @require_torch @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ ) __snake_case : Dict = torch.tensor([[1, 2]] , device=a_ ) __snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ ) # forward pass with torch.no_grad(): __snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ ) __snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] ) __snake_case : str = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , ) self.assertTrue(outputs.last_hidden_state.shape , a_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
24
1
"""simple docstring""" import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__) torch.set_grad_enabled(False) SCREAMING_SNAKE_CASE : Optional[int] = """cuda""" if torch.cuda.is_available() else """cpu""" def lowercase ( _snake_case : str , _snake_case : str=100 , _snake_case : List[str]=" " ) ->List[str]: """simple docstring""" __snake_case : Dict = text.split(_snake_case ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_snake_case ) , _snake_case )] def lowercase ( _snake_case : dict ) ->dict: """simple docstring""" __snake_case , __snake_case : Optional[Any] = [], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(_snake_case ): titles.append(title if title is not None else '''''' ) texts.append(_snake_case ) return {"title": titles, "text": texts} def lowercase ( _snake_case : dict , _snake_case : DPRContextEncoder , _snake_case : DPRContextEncoderTokenizerFast ) ->dict: """simple docstring""" __snake_case : Tuple = ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=_snake_case , padding='''longest''' , return_tensors='''pt''' )['''input_ids'''] __snake_case : Tuple = ctx_encoder(input_ids.to(device=_snake_case ) , return_dict=_snake_case ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowercase ( _snake_case : "RagExampleArguments" , _snake_case : "ProcessingArguments" , _snake_case : "IndexHnswArguments" , ) ->int: """simple docstring""" logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way __snake_case : Dict = load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words __snake_case : List[Any] = dataset.map(_snake_case , batched=_snake_case , num_proc=processing_args.num_proc ) # And compute the embeddings __snake_case : Dict = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_snake_case ) __snake_case : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) __snake_case : int = Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space __snake_case : int = dataset.map( partial(_snake_case , ctx_encoder=_snake_case , ctx_tokenizer=_snake_case ) , batched=_snake_case , batch_size=processing_args.batch_size , features=_snake_case , ) # And finally save your dataset __snake_case : Tuple = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(_snake_case ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search __snake_case : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=_snake_case ) # And save the index __snake_case : str = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(_snake_case ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =field( default=str(Path(__snake_case ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ), metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''}, ) lowerCamelCase__ =field( default=__snake_case, metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'}, ) lowerCamelCase__ =field( default='facebook/rag-sequence-nq', metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''}, ) lowerCamelCase__ =field( default='facebook/dpr-ctx_encoder-multiset-base', metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) }, ) lowerCamelCase__ =field( default=str(Path(__snake_case ).parent / 'test_run' / 'dummy-kb' ), metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'}, ) @dataclass class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =field( default=__snake_case, metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' }, ) lowerCamelCase__ =field( default=16, metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' }, ) @dataclass class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =field( default=768, metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'}, ) lowerCamelCase__ =field( default=128, metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) }, ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) SCREAMING_SNAKE_CASE : str = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE : Optional[int] = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
24
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ): '''simple docstring''' __snake_case : List[Any] = parent __snake_case : List[Any] = batch_size __snake_case : str = seq_length __snake_case : Any = is_training __snake_case : Any = use_input_mask __snake_case : str = use_token_type_ids __snake_case : Dict = use_labels __snake_case : int = vocab_size __snake_case : Union[str, Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : str = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : str = hidden_act __snake_case : Union[str, Any] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : str = max_position_embeddings __snake_case : Dict = type_vocab_size __snake_case : List[Any] = type_sequence_label_size __snake_case : Union[str, Any] = initializer_range __snake_case : str = num_labels __snake_case : Dict = num_choices __snake_case : Optional[int] = scope def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Dict = None if self.use_input_mask: __snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Tuple = None __snake_case : List[str] = None __snake_case : Dict = None if self.use_labels: __snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[str] = DistilBertModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model(a_ , a_ ) __snake_case : List[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : Optional[Any] = model( a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Any = self.num_labels __snake_case : Optional[int] = DistilBertForSequenceClassification(a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = self.num_labels __snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = self.num_choices __snake_case : Any = DistilBertForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : Optional[int] = model( a_ , attention_mask=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs __snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCamelCase__ =( { 'feature-extraction': DistilBertModel, 'fill-mask': DistilBertForMaskedLM, 'question-answering': DistilBertForQuestionAnswering, 'text-classification': DistilBertForSequenceClassification, 'token-classification': DistilBertForTokenClassification, 'zero-shot': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = DistilBertModelTester(self ) __snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Tuple = DistilBertModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __snake_case : List[str] = True __snake_case : Tuple = model_class(config=a_ ) __snake_case : Any = self._prepare_for_class(a_ , a_ ) __snake_case : Dict = torch.jit.trace( a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) ) __snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ ) loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) ) @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __snake_case : List[Any] = model(a_ , attention_mask=a_ )[0] __snake_case : Tuple = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , a_ ) __snake_case : Optional[int] = torch.tensor( [[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
24
1
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def lowercase ( _snake_case : Dataset , _snake_case : Dict[str, str] ) ->List[Any]: """simple docstring""" __snake_case : int = args.log_outputs __snake_case : Union[str, Any] = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric __snake_case : Union[str, Any] = load_metric('''wer''' ) __snake_case : Tuple = load_metric('''cer''' ) # compute metrics __snake_case : Union[str, Any] = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) __snake_case : List[Any] = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results __snake_case : Union[str, Any] = f"""WER: {wer_result}\nCER: {cer_result}""" print(_snake_case ) with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f: f.write(_snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: __snake_case : List[Any] = f"""log_{dataset_id}_predictions.txt""" __snake_case : List[Any] = f"""log_{dataset_id}_targets.txt""" with open(_snake_case , '''w''' ) as p, open(_snake_case , '''w''' ) as t: # mapping function to write output def write_to_file(_snake_case : str , _snake_case : Optional[int] ): p.write(f"""{i}""" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"""{i}""" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(_snake_case , with_indices=_snake_case ) def lowercase ( _snake_case : str ) ->str: """simple docstring""" __snake_case : Optional[Any] = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training __snake_case : Optional[Any] = re.sub(_snake_case , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! __snake_case : Optional[Any] = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: __snake_case : int = ''' '''.join(text.split(_snake_case ) ) return text def lowercase ( _snake_case : Optional[Any] ) ->Tuple: """simple docstring""" __snake_case : Optional[Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor __snake_case : Tuple = AutoFeatureExtractor.from_pretrained(args.model_id ) __snake_case : str = feature_extractor.sampling_rate # resample audio __snake_case : Tuple = dataset.cast_column('''audio''' , Audio(sampling_rate=_snake_case ) ) # load eval pipeline if args.device is None: __snake_case : Dict = 0 if torch.cuda.is_available() else -1 __snake_case : Optional[int] = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_snake_case : List[Any] ): __snake_case : Tuple = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) __snake_case : Union[str, Any] = prediction['''text'''] __snake_case : Any = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples __snake_case : Optional[int] = dataset.map(_snake_case , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_snake_case , _snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) SCREAMING_SNAKE_CASE : int = parser.parse_args() main(args)
24
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]: """simple docstring""" def get_masked_lm_array(_snake_case : str ): __snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : str = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Any = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_array(_snake_case : str ): __snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_layer_array(_snake_case : int , _snake_case : str ): __snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[Any] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ): __snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case ) __snake_case : int = array.reshape(_snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) print(f"""Loading model based on config from {config_path}...""" ) __snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case ) __snake_case : Dict = BertForMaskedLM(_snake_case ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __snake_case : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention __snake_case : BertSelfAttention = layer.attention.self __snake_case : int = get_encoder_attention_layer_array( _snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape ) __snake_case : List[Any] = get_encoder_attention_layer_array( _snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape ) __snake_case : Union[str, Any] = get_encoder_attention_layer_array( _snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape ) # Self-attention Output __snake_case : BertSelfOutput = layer.attention.output __snake_case : Dict = get_encoder_attention_layer_array( _snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape ) __snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' ) __snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' ) # Intermediate __snake_case : BertIntermediate = layer.intermediate __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' ) __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' ) # Output __snake_case : BertOutput = layer.output __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' ) __snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' ) __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' ) __snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' ) # Embeddings __snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' ) __snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' ) __snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' ) __snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' ) # LM Head __snake_case : Optional[Any] = model.cls.predictions.transform __snake_case : Dict = get_masked_lm_array('''dense/kernel''' ) __snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' ) __snake_case : str = get_masked_lm_array('''layer_norm/gamma''' ) __snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' ) __snake_case : Tuple = get_masked_lm_array('''embedding_table''' ) # Pooling __snake_case : Optional[Any] = BertPooler(config=_snake_case ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' ) # Export final model model.save_pretrained(_snake_case ) # Integration test - should load without any errors ;) __snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case ) print(new_model.eval() ) print('''Model conversion was done sucessfully!''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model.""", ) SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
24
1
"""simple docstring""" import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") class _UpperCAmelCase ( __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =BartphoTokenizer lowerCamelCase__ =False lowerCamelCase__ =True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' super().setUp() __snake_case : Tuple = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] __snake_case : Tuple = dict(zip(a_ , range(len(a_ ) ) ) ) __snake_case : Optional[Any] = {'''unk_token''': '''<unk>'''} __snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ) with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(f"""{token} {vocab_tokens[token]}\n""" ) __snake_case : Optional[Any] = BartphoTokenizer(a_ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE (self , **a_ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **a_ ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Dict = '''This is a là test''' __snake_case : Optional[Any] = '''This is a<unk><unk> test''' return input_text, output_text def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = BartphoTokenizer(a_ , self.monolingual_vocab_file , **self.special_tokens_map ) __snake_case : Optional[Any] = '''This is a là test''' __snake_case : int = '''▁This ▁is ▁a ▁l à ▁t est'''.split() __snake_case : Dict = tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) __snake_case : List[Any] = tokens + [tokenizer.unk_token] __snake_case : Any = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
24
"""simple docstring""" import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_ , a_ = None , a_ = None , a_ = False , **a_ , ): '''simple docstring''' super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ ) __snake_case : Union[str, Any] = Sql( cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = None __snake_case : Dict = None __snake_case : Dict = None __snake_case : List[str] = None self.builder.download_and_prepare( download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , ) # Build dataset for splits __snake_case : Any = self.builder.as_dataset( split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory ) return dataset class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_ , a_ , a_ = None , a_ = None , **a_ , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" ) __snake_case : List[str] = dataset __snake_case : Tuple = name __snake_case : Optional[int] = con __snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __snake_case : Dict = num_proc __snake_case : Dict = to_sql_kwargs def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ ) __snake_case : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ ) __snake_case : Any = self.to_sql_kwargs.pop('''index''' , a_ ) __snake_case : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs ) return written def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case , __snake_case , __snake_case : Optional[Any] = args __snake_case : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs __snake_case : Dict = query_table( table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , ) __snake_case : Tuple = batch.to_pandas() __snake_case : str = df.to_sql(self.name , self.con , index=a_ , **a_ ) return num_rows or len(a_ ) def SCREAMING_SNAKE_CASE (self , a_ , **a_ ): '''simple docstring''' __snake_case : int = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: __snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
24
1
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =['image_processor', 'tokenizer'] lowerCamelCase__ ='CLIPImageProcessor' lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self , a_=None , a_=None , **a_ ): '''simple docstring''' __snake_case : Any = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a_ , ) __snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' ) __snake_case : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a_ , a_ ) def __call__(self , a_=None , a_=None , a_=None , **a_ ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if images is not None: __snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: __snake_case : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.batch_decode(*a_ , **a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.decode(*a_ , **a_ ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.tokenizer.model_input_names __snake_case : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
24
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[int] = { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""", } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='lxmert' lowerCamelCase__ ={} def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = vocab_size __snake_case : List[str] = hidden_size __snake_case : List[Any] = num_attention_heads __snake_case : int = hidden_act __snake_case : int = intermediate_size __snake_case : Any = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Tuple = max_position_embeddings __snake_case : List[str] = type_vocab_size __snake_case : str = initializer_range __snake_case : Tuple = layer_norm_eps __snake_case : List[Any] = num_qa_labels __snake_case : int = num_object_labels __snake_case : Optional[Any] = num_attr_labels __snake_case : Union[str, Any] = l_layers __snake_case : Optional[int] = x_layers __snake_case : Optional[int] = r_layers __snake_case : Tuple = visual_feat_dim __snake_case : Optional[int] = visual_pos_dim __snake_case : Dict = visual_loss_normalizer __snake_case : str = task_matched __snake_case : Optional[Any] = task_mask_lm __snake_case : List[str] = task_obj_predict __snake_case : Optional[Any] = task_qa __snake_case : Any = visual_obj_loss __snake_case : int = visual_attr_loss __snake_case : List[Any] = visual_feat_loss __snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**a_ )
24
1
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]: """simple docstring""" def get_masked_lm_array(_snake_case : str ): __snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : str = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Any = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_array(_snake_case : str ): __snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_layer_array(_snake_case : int , _snake_case : str ): __snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[Any] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ): __snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case ) __snake_case : int = array.reshape(_snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) print(f"""Loading model based on config from {config_path}...""" ) __snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case ) __snake_case : Dict = BertForMaskedLM(_snake_case ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __snake_case : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention __snake_case : BertSelfAttention = layer.attention.self __snake_case : int = get_encoder_attention_layer_array( _snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape ) __snake_case : List[Any] = get_encoder_attention_layer_array( _snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape ) __snake_case : Union[str, Any] = get_encoder_attention_layer_array( _snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape ) # Self-attention Output __snake_case : BertSelfOutput = layer.attention.output __snake_case : Dict = get_encoder_attention_layer_array( _snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape ) __snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' ) __snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' ) # Intermediate __snake_case : BertIntermediate = layer.intermediate __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' ) __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' ) # Output __snake_case : BertOutput = layer.output __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' ) __snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' ) __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' ) __snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' ) # Embeddings __snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' ) __snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' ) __snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' ) __snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' ) # LM Head __snake_case : Optional[Any] = model.cls.predictions.transform __snake_case : Dict = get_masked_lm_array('''dense/kernel''' ) __snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' ) __snake_case : str = get_masked_lm_array('''layer_norm/gamma''' ) __snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' ) __snake_case : Tuple = get_masked_lm_array('''embedding_table''' ) # Pooling __snake_case : Optional[Any] = BertPooler(config=_snake_case ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' ) # Export final model model.save_pretrained(_snake_case ) # Integration test - should load without any errors ;) __snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case ) print(new_model.eval() ) print('''Model conversion was done sucessfully!''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model.""", ) SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
24
"""simple docstring""" def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" __snake_case : Tuple = len(_snake_case ) __snake_case : str = sum(_snake_case ) __snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __snake_case : Optional[Any] = True for i in range(1 , s + 1 ): __snake_case : int = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __snake_case : Union[str, Any] = dp[i][j - 1] if arr[i - 1] <= j: __snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __snake_case : List[str] = s - 2 * j break return diff
24
1
"""simple docstring""" import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def lowercase ( ) ->None: """simple docstring""" print('''Making key files...''' ) make_key_files('''rsa''' , 1_024 ) print('''Key files generation successful.''' ) def lowercase ( _snake_case : int ) ->tuple[tuple[int, int], tuple[int, int]]: """simple docstring""" print('''Generating prime p...''' ) __snake_case : Dict = rabinMiller.generate_large_prime(_snake_case ) print('''Generating prime q...''' ) __snake_case : Optional[int] = rabinMiller.generate_large_prime(_snake_case ) __snake_case : Dict = p * q print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' ) while True: __snake_case : Optional[int] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(_snake_case , (p - 1) * (q - 1) ) == 1: break print('''Calculating d that is mod inverse of e...''' ) __snake_case : Any = cryptoMath.find_mod_inverse(_snake_case , (p - 1) * (q - 1) ) __snake_case : Optional[Any] = (n, e) __snake_case : Any = (n, d) return (public_key, private_key) def lowercase ( _snake_case : str , _snake_case : int ) ->None: """simple docstring""" if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ): print('''\nWARNING:''' ) print( f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" '''Use a different name or delete these files and re-run this program.''' ) sys.exit() __snake_case , __snake_case : Union[str, Any] = generate_key(_snake_case ) print(f"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(f"""{name}_pubkey.txt""" , '''w''' ) as out_file: out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" ) print(f"""Writing private key to file {name}_privkey.txt...""" ) with open(f"""{name}_privkey.txt""" , '''w''' ) as out_file: out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" ) if __name__ == "__main__": main()
24
"""simple docstring""" from collections.abc import Callable def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float: """simple docstring""" __snake_case : float = a __snake_case : float = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: __snake_case : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: __snake_case : List[str] = mid else: __snake_case : str = mid __snake_case : str = start + (end - start) / 2.0 return mid def lowercase ( _snake_case : float ) ->float: """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
24
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=__snake_case ): '''simple docstring''' lowerCamelCase__ =['onnx'] def __init__(self , *a_ , **a_ ): '''simple docstring''' requires_backends(self , ['''onnx'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''onnx'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''onnx'''] )
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[str] = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : str = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" import qiskit def lowercase ( _snake_case : int , _snake_case : int ) ->qiskit.result.counts.Counts: """simple docstring""" __snake_case : Any = qiskit.Aer.get_backend('''aer_simulator''' ) # Create a Quantum Circuit acting on the q register __snake_case : List[Any] = qiskit.QuantumCircuit(_snake_case , _snake_case ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1] ) # Execute the circuit on the qasm simulator __snake_case : Optional[int] = qiskit.execute(_snake_case , _snake_case , shots=1_000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(_snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Union[str, Any] = single_qubit_measure(2, 2) print(F'Total count for various states are: {counts}')
24
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =['image_processor', 'tokenizer'] lowerCamelCase__ ='CLIPImageProcessor' lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self , a_=None , a_=None , **a_ ): '''simple docstring''' __snake_case : Any = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a_ , ) __snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' ) __snake_case : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a_ , a_ ) def __call__(self , a_=None , a_=None , a_=None , **a_ ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if images is not None: __snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: __snake_case : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.batch_decode(*a_ , **a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.decode(*a_ , **a_ ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.tokenizer.model_input_names __snake_case : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE : Union[str, Any] = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Optional[int] = ["""ReformerTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[str] = ["""ReformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Dict = [ """REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ReformerAttention""", """ReformerForMaskedLM""", """ReformerForQuestionAnswering""", """ReformerForSequenceClassification""", """ReformerLayer""", """ReformerModel""", """ReformerModelWithLMHead""", """ReformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
24
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE : List[Any] = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE : Tuple = { """facebook/mbart-large-en-ro""": 1024, """facebook/mbart-large-cc25""": 1024, } # fmt: off SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =['input_ids', 'attention_mask'] lowerCamelCase__ =MBartTokenizer lowerCamelCase__ =[] lowerCamelCase__ =[] def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token super().__init__( vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , ) __snake_case : Tuple = vocab_file __snake_case : Optional[Any] = False if not self.vocab_file else True __snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) __snake_case : Optional[int] = { lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX''' __snake_case : Any = self.convert_tokens_to_ids(self._src_lang ) __snake_case : Dict = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' __snake_case : Tuple = [self.sep_token_id] __snake_case : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __snake_case : Optional[int] = src_lang __snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ ) __snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ ) __snake_case : int = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ): '''simple docstring''' __snake_case : int = src_lang __snake_case : List[Any] = tgt_lang return super().prepare_seqaseq_batch(a_ , a_ , **a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : List[Any] = [] __snake_case : Any = [self.eos_token_id, self.cur_lang_code] __snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : Optional[Any] = [] __snake_case : Dict = [self.eos_token_id, self.cur_lang_code] __snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(a_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return __snake_case : Optional[Any] = os.path.join( a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file , a_ ) return (out_vocab_file,)
24
1
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) def lowercase ( _snake_case : bool , _snake_case : bool ) ->Optional[int]: """simple docstring""" def run_func(_snake_case : Any ): @wraps(_snake_case ) def run_in_eager_mode(*_snake_case : Union[str, Any] , **_snake_case : Optional[Any] ): return func(*_snake_case , **_snake_case ) @wraps(_snake_case ) @tf.function(experimental_compile=_snake_case ) def run_in_graph_mode(*_snake_case : Optional[int] , **_snake_case : List[str] ): return func(*_snake_case , **_snake_case ) if do_eager_mode is True: if use_xla is not False: raise ValueError( '''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' ) return run_in_eager_mode else: return run_in_graph_mode return run_func def lowercase ( _snake_case : int , _snake_case : int , _snake_case : int ) ->["tf.Tensor"]: """simple docstring""" __snake_case : Any = random.Random() __snake_case : Optional[Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(_snake_case , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ ="TensorFlow" @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return tf.__version__ def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[str] = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) __snake_case : List[str] = self._prepare_inference_func(a_ , a_ , a_ ) return self._measure_speed(_inference ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) __snake_case : Optional[int] = self._prepare_train_func(a_ , a_ , a_ ) return self._measure_speed(_train ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a_ ) __snake_case : List[str] = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) __snake_case : Tuple = self._prepare_inference_func(a_ , a_ , a_ ) return self._measure_memory(_inference ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a_ ) __snake_case : str = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) __snake_case : str = self._prepare_train_func(a_ , a_ , a_ ) return self._measure_memory(_train ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' __snake_case : int = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) __snake_case : Dict = ( hasattr(a_ , '''architectures''' ) and isinstance(config.architectures , a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: __snake_case : List[str] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model __snake_case : str = __import__('''transformers''' , fromlist=[model_class] ) __snake_case : Optional[Any] = getattr(a_ , a_ ) __snake_case : Dict = model_cls(a_ ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: __snake_case : Optional[Any] = TF_MODEL_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently __snake_case : Any = config.vocab_size if hasattr(a_ , '''vocab_size''' ) else config.encoder.vocab_size __snake_case : Union[str, Any] = random_input_ids(a_ , a_ , a_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(a_ , decoder_input_ids=a_ , training=a_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(a_ , training=a_ ) __snake_case : List[Any] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Tuple = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' ) if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) __snake_case : Optional[Any] = ( hasattr(a_ , '''architectures''' ) and isinstance(config.architectures , a_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: __snake_case : Dict = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model __snake_case : Any = __import__('''transformers''' , fromlist=[model_class] ) __snake_case : List[str] = getattr(a_ , a_ ) __snake_case : int = model_cls(a_ ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: __snake_case : Any = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ ) # encoder-decoder has vocab size saved differently __snake_case : Optional[Any] = config.vocab_size if hasattr(a_ , '''vocab_size''' ) else config.encoder.vocab_size __snake_case : Union[str, Any] = random_input_ids(a_ , a_ , a_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): __snake_case : str = model(a_ , decoder_input_ids=a_ , labels=a_ , training=a_ )[0] __snake_case : Any = tf.gradients(a_ , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): __snake_case : Union[str, Any] = model(a_ , labels=a_ , training=a_ )[0] __snake_case : List[Any] = tf.gradients(a_ , model.trainable_variables ) return gradients __snake_case : Tuple = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' ) timeit.repeat(a_ , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average __snake_case : Union[str, Any] = timeit.repeat( a_ , repeat=self.args.repeat , number=10 , ) return min(a_ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' logger.info( '''Note that TensorFlow allocates more memory than ''' '''it might need to speed up computation. ''' '''The memory reported here corresponds to the memory ''' '''reported by `nvidia-smi`, which can vary depending ''' '''on total available memory on the GPU that is used.''' ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( '''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory''' ''' consumption line by line.''' ) __snake_case : List[Any] = start_memory_tracing('''transformers''' ) if self.args.is_tpu: # tpu raise NotImplementedError( '''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking''' ''' with `args.memory=False`''' ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( '''py3nvml not installed, we won\'t log GPU memory usage. ''' '''Install py3nvml (pip install py3nvml) to log information about GPU.''' ) __snake_case : Any = '''N/A''' else: logger.info( '''Measuring total GPU usage on GPU device. Make sure to not have additional processes''' ''' running on the same GPU.''' ) # init nvml nvml.nvmlInit() func() __snake_case : int = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) __snake_case : Any = nvml.nvmlDeviceGetMemoryInfo(a_ ) __snake_case : List[str] = meminfo.used __snake_case : Any = Memory(a_ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( '''When enabling line by line tracing, the max peak memory for CPU is inaccurate in''' ''' TensorFlow.''' ) __snake_case : int = None else: __snake_case : Optional[Any] = measure_peak_memory_cpu(a_ ) __snake_case : str = Memory(a_ ) if isinstance(a_ , a_ ) else memory_bytes if self.args.trace_memory_line_by_line: __snake_case : str = stop_memory_tracing(a_ ) if memory is None: __snake_case : str = summary.total else: __snake_case : List[str] = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) return "N/A", None
24
"""simple docstring""" import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__) @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None if is_torch_available(): import torch from torch.utils.data import Dataset class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ): '''simple docstring''' __snake_case : Any = hans_processors[task]() __snake_case : int = os.path.join( a_ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , ) __snake_case : Tuple = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Dict = label_list[2], label_list[1] __snake_case : Any = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case : int = cached_features_file + '''.lock''' with FileLock(a_ ): if os.path.exists(a_ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case : Union[str, Any] = torch.load(a_ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case : Dict = ( processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) ) logger.info('''Training examples: %s''' , len(a_ ) ) __snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) logger.info('''Saving features into cached file %s''' , a_ ) torch.save(self.features , a_ ) def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list if is_tf_available(): import tensorflow as tf class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ): '''simple docstring''' __snake_case : List[Any] = hans_processors[task]() __snake_case : str = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Tuple = label_list[2], label_list[1] __snake_case : Dict = label_list __snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) __snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_00_00 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case : Union[str, Any] = tf.data.Dataset.from_generator( a_ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.dataset def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return ["contradiction", "entailment", "neutral"] def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = [] for i, line in enumerate(a_ ): if i == 0: continue __snake_case : Tuple = '''%s-%s''' % (set_type, line[0]) __snake_case : Dict = line[5] __snake_case : int = line[6] __snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case : List[Any] = line[0] examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) ) return examples def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]: """simple docstring""" __snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )} __snake_case : Tuple = [] for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ): if ex_index % 10_000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case : List[Any] = tokenizer( example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , ) __snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0 __snake_case : Union[str, Any] = int(example.pairID ) features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features SCREAMING_SNAKE_CASE : Dict = { """hans""": 3, } SCREAMING_SNAKE_CASE : str = { """hans""": HansProcessor, }
24
1
"""simple docstring""" from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent SCREAMING_SNAKE_CASE : List[str] = {"""UserAgent""": UserAgent().random} def lowercase ( _snake_case : Dict ) ->dict: """simple docstring""" __snake_case : int = script.contents[0] __snake_case : Any = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : List[Any] = f"""https://www.instagram.com/{username}/""" __snake_case : Dict = self.get_json() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = requests.get(self.url , headers=a_ ).text __snake_case : str = BeautifulSoup(a_ , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__(self ): '''simple docstring''' return f"""{self.__class__.__name__}('{self.username}')""" def __str__(self ): '''simple docstring''' return f"""{self.fullname} ({self.username}) is {self.biography}""" @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["username"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["full_name"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["biography"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["business_email"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["external_url"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["edge_followed_by"]["count"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["edge_follow"]["count"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["edge_owner_to_timeline_media"]["count"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["profile_pic_url_hd"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["is_verified"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["is_private"] def lowercase ( _snake_case : str = "github" ) ->None: """simple docstring""" import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions __snake_case : Dict = InstagramUser(_snake_case ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , _snake_case ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120_000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : Optional[int] = InstagramUser("""github""") print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
24
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[str] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='gptsan-japanese' lowerCamelCase__ =[ 'past_key_values', ] lowerCamelCase__ ={ 'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__(self , a_=3_60_00 , a_=12_80 , a_=10_24 , a_=81_92 , a_=40_96 , a_=1_28 , a_=10 , a_=0 , a_=16 , a_=16 , a_=1_28 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_59_98 , a_=3_59_95 , a_=3_59_99 , **a_ , ): '''simple docstring''' __snake_case : Any = vocab_size __snake_case : str = max_position_embeddings __snake_case : Any = d_model __snake_case : List[str] = d_ff __snake_case : Dict = d_ext __snake_case : Optional[Any] = d_spout __snake_case : int = num_switch_layers __snake_case : List[Any] = num_ext_layers __snake_case : Any = num_switch_layers + num_ext_layers __snake_case : Optional[int] = num_heads __snake_case : Tuple = num_experts __snake_case : List[Any] = expert_capacity __snake_case : Dict = dropout_rate __snake_case : Optional[Any] = layer_norm_epsilon __snake_case : Dict = router_bias __snake_case : str = router_jitter_noise __snake_case : List[str] = router_dtype __snake_case : Union[str, Any] = router_ignore_padding_tokens __snake_case : List[str] = output_hidden_states __snake_case : Optional[Any] = output_attentions __snake_case : Any = initializer_factor __snake_case : int = output_router_logits __snake_case : Union[str, Any] = use_cache super().__init__( separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
24
1
"""simple docstring""" def lowercase ( _snake_case : list , _snake_case : int = 0 ) ->list: """simple docstring""" __snake_case : Optional[int] = length or len(_snake_case ) __snake_case : List[str] = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: __snake_case , __snake_case : str = list_data[i + 1], list_data[i] __snake_case : Dict = True return list_data if not swapped else bubble_sort(_snake_case , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } SCREAMING_SNAKE_CASE : int = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def lowercase ( _snake_case : Optional[int] ) ->int: """simple docstring""" __snake_case : int = {} with open(_snake_case , '''r''' ) as file: for line_number, line in enumerate(_snake_case ): __snake_case : Union[str, Any] = line.strip() if line: __snake_case : str = line.split() __snake_case : Union[str, Any] = line_number __snake_case : Dict = words[0] __snake_case : str = value return result def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]: """simple docstring""" for attribute in key.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : str = '''param''' if weight_type is not None and weight_type != "param": __snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape elif weight_type is not None and weight_type == "param": __snake_case : Optional[Any] = hf_pointer for attribute in hf_param_name.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : List[str] = shape_pointer.shape # let's reduce dimension __snake_case : int = value[0] else: __snake_case : int = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __snake_case : List[Any] = value elif weight_type == "weight_g": __snake_case : Tuple = value elif weight_type == "weight_v": __snake_case : str = value elif weight_type == "bias": __snake_case : str = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __snake_case : List[Any] = getattr(_snake_case , _snake_case ) __snake_case : int = value else: __snake_case : List[Any] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int: """simple docstring""" __snake_case : Optional[Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : List[str] = '''param''' if weight_type is not None and weight_type != "param": __snake_case : str = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __snake_case : Tuple = '''.'''.join([key, hf_param_name] ) else: __snake_case : Optional[int] = key __snake_case : List[Any] = value if '''lm_head''' in full_key else value[0] SCREAMING_SNAKE_CASE : Tuple = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict: """simple docstring""" __snake_case : Tuple = False for key, mapped_key in MAPPING.items(): __snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __snake_case : int = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2] __snake_case : Tuple = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: __snake_case : Union[str, Any] = '''weight_g''' elif "weight_v" in name: __snake_case : List[str] = '''weight_v''' elif "bias" in name: __snake_case : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case : List[Any] = '''weight''' else: __snake_case : Union[str, Any] = None if hf_dict is not None: rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) else: set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) return is_used return is_used def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any: """simple docstring""" __snake_case : Union[str, Any] = [] __snake_case : Union[str, Any] = fairseq_model.state_dict() __snake_case : str = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __snake_case : str = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) __snake_case : Union[str, Any] = True else: __snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case ) if not is_used: unused_weights.append(_snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1] __snake_case : str = name.split('''.''' ) __snake_case : Optional[int] = int(items[0] ) __snake_case : Any = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __snake_case : int = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __snake_case : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_snake_case ) @torch.no_grad() def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict: """simple docstring""" if config_path is not None: __snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case ) else: __snake_case : Tuple = WavaVecaConfig() if is_seq_class: __snake_case : Optional[int] = read_txt_into_dict(_snake_case ) __snake_case : List[Any] = idalabel __snake_case : int = WavaVecaForSequenceClassification(_snake_case ) __snake_case : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) feature_extractor.save_pretrained(_snake_case ) elif is_finetuned: if dict_path: __snake_case : int = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Tuple = target_dict.pad_index __snake_case : int = target_dict.bos_index __snake_case : Tuple = target_dict.eos_index __snake_case : Optional[Any] = len(target_dict.symbols ) __snake_case : Any = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) __snake_case : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched __snake_case : Dict = 0 __snake_case : List[Any] = 1 with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_snake_case , _snake_case ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) __snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False __snake_case : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) __snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) __snake_case : Optional[int] = WavaVecaForCTC(_snake_case ) else: __snake_case : Tuple = WavaVecaForPreTraining(_snake_case ) if is_finetuned or is_seq_class: __snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' ) __snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case ) __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case ) __snake_case : int = model[0].eval() recursively_load_weights(_snake_case , _snake_case , not is_finetuned ) hf_wavavec.save_pretrained(_snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) SCREAMING_SNAKE_CASE : Any = parser.parse_args() SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
24
1
"""simple docstring""" import pprint import requests SCREAMING_SNAKE_CASE : Any = """https://zenquotes.io/api""" def lowercase ( ) ->list: """simple docstring""" return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def lowercase ( ) ->list: """simple docstring""" return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": SCREAMING_SNAKE_CASE : List[str] = random_quotes() pprint.pprint(response)
24
"""simple docstring""" from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=__snake_case ): '''simple docstring''' lowerCamelCase__ =['transformers', 'torch', 'note_seq'] def __init__(self , *a_ , **a_ ): '''simple docstring''' requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
24
1
"""simple docstring""" import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def lowercase ( _snake_case : List[Any] ) ->Optional[Any]: """simple docstring""" __snake_case : List[str] = checkpoints.load_tax_checkpoint(_snake_case ) __snake_case : Optional[int] = flatten_dict(_snake_case ) return flax_params def lowercase ( _snake_case : Any ) ->Optional[int]: """simple docstring""" __snake_case : str = {} __snake_case : Dict = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } __snake_case : List[Any] = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key __snake_case : Tuple = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): __snake_case : int = new_key.replace(_snake_case , _snake_case ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): __snake_case : Union[str, Any] = new_key.replace(_snake_case , _snake_case ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number __snake_case : Dict = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _snake_case ) __snake_case : str = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number __snake_case : Any = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _snake_case ) __snake_case : List[str] = flax_dict[key] __snake_case : int = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): __snake_case : Any = torch.from_numpy(converted_dict[key].T ) else: __snake_case : int = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def lowercase ( _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : str=False , _snake_case : str=False ) ->List[Any]: """simple docstring""" __snake_case : Optional[Any] = get_flax_param(_snake_case ) if not use_large: __snake_case : Union[str, Any] = PixaStructVisionConfig() __snake_case : Optional[int] = PixaStructTextConfig() else: __snake_case : List[str] = PixaStructVisionConfig( hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 ) __snake_case : int = PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 ) __snake_case : Tuple = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_snake_case ) __snake_case : Any = PixaStructForConditionalGeneration(_snake_case ) __snake_case : int = rename_and_convert_flax_params(_snake_case ) model.load_state_dict(_snake_case ) __snake_case : Optional[Any] = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) __snake_case : Tuple = PixaStructImageProcessor() __snake_case : Optional[int] = PixaStructProcessor(image_processor=_snake_case , tokenizer=_snake_case ) if use_large: __snake_case : Optional[Any] = 4_096 __snake_case : Dict = True # mkdir if needed os.makedirs(_snake_case , exist_ok=_snake_case ) model.save_pretrained(_snake_case ) processor.save_pretrained(_snake_case ) print('''Model saved in {}'''.format(_snake_case ) ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser() parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""") parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""") SCREAMING_SNAKE_CASE : Dict = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
24
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ): '''simple docstring''' __snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20} __snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __snake_case : Tuple = parent __snake_case : Tuple = batch_size __snake_case : Tuple = num_channels __snake_case : List[str] = image_size __snake_case : Optional[Any] = min_resolution __snake_case : List[Any] = max_resolution __snake_case : List[Any] = do_resize __snake_case : Dict = size __snake_case : Dict = do_center_crop __snake_case : Dict = crop_size __snake_case : str = do_flip_channel_order def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _UpperCAmelCase ( __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = MobileViTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , '''do_resize''' ) ) self.assertTrue(hasattr(a_ , '''size''' ) ) self.assertTrue(hasattr(a_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(a_ , '''center_crop''' ) ) self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input __snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input __snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
24
1
"""simple docstring""" import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowercase ( _snake_case : str , _snake_case : Tuple , _snake_case : List[Any] ) ->Dict: """simple docstring""" __snake_case : Optional[Any] = AutoConfig.from_pretrained(_snake_case ) __snake_case : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_snake_case ) __snake_case : Dict = checkpoints.load_tax_checkpoint(_snake_case ) __snake_case : str = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": __snake_case : Any = '''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": __snake_case : Any = '''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __snake_case : List[Any] = '''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): __snake_case : Optional[Any] = f"""layers_{str(_snake_case )}""" # Self-Attention __snake_case : str = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] __snake_case : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] __snake_case : int = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] __snake_case : Any = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __snake_case : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization __snake_case : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: __snake_case : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] __snake_case : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: __snake_case : Dict = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] __snake_case : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization __snake_case : Dict = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning __snake_case : str = flax_model.params['''encoder''']['''block'''][str(_snake_case )]['''layer'''] __snake_case : Optional[int] = tax_attention_key __snake_case : Any = tax_attention_out __snake_case : Union[str, Any] = tax_attention_query __snake_case : str = tax_attention_value __snake_case : List[str] = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __snake_case : str = tax_global_layer_norm if split_mlp_wi: __snake_case : Dict = tax_mlp_wi_a __snake_case : List[str] = tax_mlp_wi_a else: __snake_case : Optional[Any] = tax_mlp_wi __snake_case : Any = tax_mlp_wo __snake_case : Optional[int] = tax_mlp_layer_norm __snake_case : Any = flax_model_encoder_layer_block # Only for layer 0: __snake_case : Optional[int] = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T __snake_case : Dict = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __snake_case : Union[str, Any] = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T __snake_case : Dict = tax_encoder_global_rel_embedding # Assigning __snake_case : Optional[int] = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] __snake_case : Dict = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): __snake_case : Any = f"""layers_{str(_snake_case )}""" # Self-Attention __snake_case : str = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] __snake_case : str = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] __snake_case : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] __snake_case : str = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization __snake_case : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention __snake_case : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] __snake_case : Tuple = tax_enc_dec_attention_module['''key''']['''kernel'''] __snake_case : Tuple = tax_enc_dec_attention_module['''out''']['''kernel'''] __snake_case : Tuple = tax_enc_dec_attention_module['''query''']['''kernel'''] __snake_case : List[str] = tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization __snake_case : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: __snake_case : str = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] __snake_case : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: __snake_case : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] __snake_case : int = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization __snake_case : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning __snake_case : int = flax_model.params['''decoder''']['''block'''][str(_snake_case )]['''layer'''] __snake_case : List[str] = tax_attention_key __snake_case : Optional[int] = tax_attention_out __snake_case : str = tax_attention_query __snake_case : Union[str, Any] = tax_attention_value __snake_case : Dict = tax_pre_attention_layer_norm __snake_case : List[Any] = tax_enc_dec_attention_key __snake_case : Any = tax_enc_dec_attention_out __snake_case : Union[str, Any] = tax_enc_dec_attention_query __snake_case : Dict = tax_enc_dec_attention_value __snake_case : str = tax_cross_layer_norm if split_mlp_wi: __snake_case : Optional[Any] = tax_mlp_wi_a __snake_case : int = tax_mlp_wi_a else: __snake_case : Any = tax_mlp_wi __snake_case : List[str] = tax_mlp_wo __snake_case : str = txa_mlp_layer_norm __snake_case : Dict = flax_model_decoder_layer_block # Decoder Normalization __snake_case : List[Any] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] __snake_case : Any = txa_decoder_norm # Only for layer 0: __snake_case : List[str] = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T __snake_case : Optional[int] = tax_decoder_rel_embedding # Token Embeddings __snake_case : str = tax_model['''target''']['''token_embedder''']['''embedding'''] __snake_case : List[str] = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: __snake_case : int = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(_snake_case ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint.""" ) parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""") parser.add_argument( """--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model.""" ) SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
24
"""simple docstring""" import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def lowercase ( ) ->Optional[int]: """simple docstring""" __snake_case : int = torch.nn.Linear(2 , 4 ) __snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 ) __snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) __snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) __snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def lowercase ( _snake_case : str ) ->Optional[Any]: """simple docstring""" return (model.weight.abs().sum() + model.bias.abs().sum()).item() def lowercase ( _snake_case : Union[str, Any] ) ->Tuple: """simple docstring""" __snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(_snake_case ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(a_ ): __snake_case : Any = Accelerator(cpu=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case : Optional[int] = GradientState() assert state.num_steps == 1 __snake_case : str = 4 assert state.num_steps == 4 assert state.sync_gradients is True __snake_case : List[Any] = False assert state.sync_gradients is False GradientState._reset_state() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*a_ , **a_ ): pass with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ): __snake_case : List[Any] = Accelerator() self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : Any = get_signature(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # make sure loaded weights match accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : List[Any] = get_signature(a_ ) # saving hook def save_config(a_ , a_ , a_ ): __snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__} with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f: json.dump(a_ , a_ ) # loading hook def load_config(a_ , a_ ): with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f: __snake_case : Any = json.load(a_ ) __snake_case : List[str] = config['''class_name'''] __snake_case : str = accelerator.register_save_state_pre_hook(a_ ) __snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Any = '''random''' # make sure loaded weights match with hooks accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks removed load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Union[str, Any] = '''random''' # make sure loaded weights match with hooks removed accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components() __snake_case : Union[str, Any] = None # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertTrue(dummy_obj is None ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() __snake_case : Optional[int] = [1, 2, 3] # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , ) __snake_case : Optional[Any] = Accelerator() # This should work __snake_case : Any = accelerator.prepare(a_ ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Any = Accelerator() with init_empty_weights(): __snake_case : List[str] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : Union[str, Any] = infer_auto_device_map(a_ ) __snake_case : str = '''cpu''' __snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ ) # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Dict = accelerator.prepare(a_ ) @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU} with init_empty_weights(): __snake_case : Any = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : List[Any] = infer_auto_device_map(a_ ) __snake_case : Dict = 1 __snake_case : str = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Any = Accelerator() # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Tuple = accelerator.prepare(a_ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM with init_empty_weights(): __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) __snake_case : Tuple = infer_auto_device_map(a_ ) __snake_case : Tuple = 1 __snake_case : List[Any] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Tuple = Accelerator() # This should work __snake_case : Dict = accelerator.prepare(a_ ) @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = torch.nn.Linear(10 , 10 ) __snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 ) __snake_case : Optional[Any] = Accelerator(cpu=a_ ) __snake_case : str = accelerator.prepare(a_ )
24
1
"""simple docstring""" import socket def lowercase ( ) ->Optional[int]: """simple docstring""" __snake_case : int = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) __snake_case : Union[str, Any] = socket.gethostname() __snake_case : int = 12_312 sock.connect((host, port) ) sock.send(b'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: __snake_case : Tuple = sock.recv(1_024 ) if not data: break out_file.write(_snake_case ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
24
"""simple docstring""" def lowercase ( _snake_case : int ) ->str: """simple docstring""" if number > 0: raise ValueError('''input must be a negative integer''' ) __snake_case : Any = len(bin(_snake_case )[3:] ) __snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:] __snake_case : Dict = ( ( '''1''' + '''0''' * (binary_number_length - len(_snake_case )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
24
1
"""simple docstring""" from math import loga def lowercase ( _snake_case : int ) ->int: """simple docstring""" if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(_snake_case , _snake_case ): raise TypeError('''Input value must be a \'int\' type''' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" def lowercase ( ) ->int: """simple docstring""" return [ a * b * (1_000 - a - b) for a in range(1 , 999 ) for b in range(_snake_case , 999 ) if (a * a + b * b == (1_000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE : List[Any] = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Union[str, Any] = [ """UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""", """UniSpeechForCTC""", """UniSpeechForPreTraining""", """UniSpeechForSequenceClassification""", """UniSpeechModel""", """UniSpeechPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
24
"""simple docstring""" def lowercase ( _snake_case : int = 100 ) ->int: """simple docstring""" __snake_case : str = n * (n + 1) * (2 * n + 1) / 6 __snake_case : Dict = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'{solution() = }')
24
1
"""simple docstring""" def lowercase ( _snake_case : Optional[int]=28_123 ) ->Dict: """simple docstring""" __snake_case : int = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i __snake_case : Tuple = set() __snake_case : Optional[int] = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(_snake_case ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
24
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__) @dataclass class _UpperCAmelCase ( datasets.BuilderConfig ): '''simple docstring''' lowerCamelCase__ =10000 lowerCamelCase__ =None lowerCamelCase__ =None class _UpperCAmelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' lowerCamelCase__ =ParquetConfig def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __snake_case : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a_ , (str, list, tuple) ): __snake_case : Union[str, Any] = data_files if isinstance(a_ , a_ ): __snake_case : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __snake_case : int = [] for split_name, files in data_files.items(): if isinstance(a_ , a_ ): __snake_case : List[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : int = [dl_manager.iter_files(a_ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(a_ ): with open(a_ , '''rb''' ) as f: __snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) ) break splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) ) return splits def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ): with open(a_ , '''rb''' ) as f: __snake_case : int = pq.ParquetFile(a_ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __snake_case : Dict = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" ) raise
24
1
"""simple docstring""" def lowercase ( _snake_case : int ) ->str: """simple docstring""" if number > 0: raise ValueError('''input must be a negative integer''' ) __snake_case : Any = len(bin(_snake_case )[3:] ) __snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:] __snake_case : Dict = ( ( '''1''' + '''0''' * (binary_number_length - len(_snake_case )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): __snake_case : Dict = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = '''sshleifer/tiny-gpt2''' __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = '''sgugger/tiny-distilbert-classification''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , ) __snake_case : Optional[Any] = TensorFlowBenchmark(a_ ) __snake_case : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Any = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ ) __snake_case : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = '''sshleifer/tiny-gpt2''' __snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Dict = TensorFlowBenchmark(a_ , [config] ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : int = TensorFlowBenchmark(a_ ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Dict = AutoConfig.from_pretrained(a_ ) __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random''' __snake_case : Tuple = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] ) __snake_case : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , ) __snake_case : Union[str, Any] = TensorFlowBenchmark(a_ ) benchmark.run() self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(a_ ): self.assertTrue(hasattr(a_ , '''sequential''' ) ) self.assertTrue(hasattr(a_ , '''cumulative''' ) ) self.assertTrue(hasattr(a_ , '''current''' ) ) self.assertTrue(hasattr(a_ , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ ) __snake_case : Optional[int] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
24
1
"""simple docstring""" def lowercase ( ) ->int: """simple docstring""" return [ a * b * (1_000 - a - b) for a in range(1 , 999 ) for b in range(_snake_case , 999 ) if (a * a + b * b == (1_000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
24
"""simple docstring""" import logging import os import threading import time try: import warnings except ImportError: SCREAMING_SNAKE_CASE : Tuple = None try: import msvcrt except ImportError: SCREAMING_SNAKE_CASE : List[str] = None try: import fcntl except ImportError: SCREAMING_SNAKE_CASE : Tuple = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: SCREAMING_SNAKE_CASE : List[str] = OSError # Data # ------------------------------------------------ SCREAMING_SNAKE_CASE : List[Any] = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] SCREAMING_SNAKE_CASE : List[Any] = """3.0.12""" SCREAMING_SNAKE_CASE : int = None def lowercase ( ) ->str: """simple docstring""" global _logger __snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ ) return _logger class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[int] = lock_file return None def __str__(self ): '''simple docstring''' __snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired.""" return temp class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[Any] = lock return None def __enter__(self ): '''simple docstring''' return self.lock def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.lock.release() return None class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long __snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ ) # The path to the lock file. __snake_case : str = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __snake_case : Dict = None # The default timeout value. __snake_case : List[Any] = timeout # We use this lock primarily for the lock counter. __snake_case : Tuple = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __snake_case : Optional[Any] = 0 return None @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._timeout @timeout.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Dict = float(a_ ) return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file_fd is not None def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ): '''simple docstring''' if timeout is None: __snake_case : List[str] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __snake_case : Optional[int] = id(self ) __snake_case : str = self._lock_file __snake_case : Optional[int] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(a_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __snake_case : Optional[int] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def SCREAMING_SNAKE_CASE (self , a_=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __snake_case : Tuple = id(self ) __snake_case : str = self._lock_file logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __snake_case : Dict = 0 logger().debug(f"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__(self ): '''simple docstring''' self.acquire() return self def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.release() return None def __del__(self ): '''simple docstring''' self.release(force=a_ ) return None def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : Any = os.path.basename(a_ ) if len(a_ ) > max_length and max_length > 0: __snake_case : List[Any] = os.path.dirname(a_ ) __snake_case : Any = str(hash(a_ ) ) __snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(a_ , a_ ) else: return path class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) __snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __snake_case : Any = os.open(self._lock_file , a_ ) except OSError: pass else: try: msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(a_ ) else: __snake_case : Dict = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Dict = None msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 ) os.close(a_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC __snake_case : List[str] = os.open(self._lock_file , a_ ) try: fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(a_ ) else: __snake_case : Optional[int] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Tuple = None fcntl.flock(a_ , fcntl.LOCK_UN ) os.close(a_ ) return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __snake_case : Tuple = os.open(self._lock_file , a_ ) except OSError: pass else: __snake_case : List[Any] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' os.close(self._lock_file_fd ) __snake_case : int = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None SCREAMING_SNAKE_CASE : Dict = None if msvcrt: SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock elif fcntl: SCREAMING_SNAKE_CASE : List[str] = UnixFileLock else: SCREAMING_SNAKE_CASE : str = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
24
1
"""simple docstring""" import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate SCREAMING_SNAKE_CASE : List[Any] = trt.Logger(trt.Logger.WARNING) SCREAMING_SNAKE_CASE : Optional[Any] = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__) SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--onnx_model_path""", default=None, type=str, required=True, help="""Path to ONNX model: """, ) parser.add_argument( """--output_dir""", default=None, type=str, required=True, help="""The output directory where the model checkpoints and predictions will be written.""", ) # Other parameters parser.add_argument( """--tokenizer_name""", default="""""", type=str, required=True, help="""Pretrained tokenizer name or path if not the same as model_name""", ) parser.add_argument( """--version_2_with_negative""", action="""store_true""", help="""If true, the SQuAD examples contain some that do not have an answer.""", ) parser.add_argument( """--null_score_diff_threshold""", type=float, default=0.0, help="""If null_score - best_non_null is greater than the threshold predict null.""", ) parser.add_argument( """--max_seq_length""", default=384, type=int, help=( """The maximum total input sequence length after WordPiece tokenization. Sequences """ """longer than this will be truncated, and sequences shorter than this will be padded.""" ), ) parser.add_argument( """--doc_stride""", default=128, type=int, help="""When splitting up a long document into chunks, how much stride to take between chunks.""", ) parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""") parser.add_argument( """--n_best_size""", default=20, type=int, help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""", ) parser.add_argument( """--max_answer_length""", default=30, type=int, help=( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ), ) parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""") parser.add_argument( """--dataset_name""", type=str, default=None, required=True, help="""The name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--dataset_config_name""", type=str, default=None, help="""The configuration name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data.""" ) parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""") parser.add_argument( """--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision instead of 32-bit""", ) parser.add_argument( """--int8""", action="""store_true""", help="""Whether to use INT8""", ) SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() if args.tokenizer_name: SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) logger.info("""Training/evaluation parameters %s""", args) SCREAMING_SNAKE_CASE : int = args.per_device_eval_batch_size SCREAMING_SNAKE_CASE : Optional[int] = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : Optional[int] = """temp_engine/bert-fp32.engine""" if args.fpaa: SCREAMING_SNAKE_CASE : str = """temp_engine/bert-fp16.engine""" if args.inta: SCREAMING_SNAKE_CASE : List[str] = """temp_engine/bert-int8.engine""" # import ONNX file if not os.path.exists("""temp_engine"""): os.makedirs("""temp_engine""") SCREAMING_SNAKE_CASE : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, """rb""") as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network SCREAMING_SNAKE_CASE : Any = [network.get_input(i) for i in range(network.num_inputs)] SCREAMING_SNAKE_CASE : Tuple = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: SCREAMING_SNAKE_CASE : int = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) SCREAMING_SNAKE_CASE : str = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) SCREAMING_SNAKE_CASE : str = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, """wb""") as f: f.write(engine.serialize()) def lowercase ( _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : List[Any] ) ->Any: """simple docstring""" __snake_case : Optional[int] = np.asarray(inputs['''input_ids'''] , dtype=np.intaa ) __snake_case : Optional[int] = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa ) __snake_case : Tuple = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _snake_case ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _snake_case ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _snake_case ) # start time __snake_case : int = time.time() # Run inference context.execute_async( bindings=[int(_snake_case ) for d_inp in d_inputs] + [int(_snake_case ), int(_snake_case )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(_snake_case , _snake_case , _snake_case ) cuda.memcpy_dtoh_async(_snake_case , _snake_case , _snake_case ) # Synchronize the stream and take time stream.synchronize() # end time __snake_case : List[str] = time.time() __snake_case : str = end_time - start_time __snake_case : List[Any] = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. SCREAMING_SNAKE_CASE : Dict = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. SCREAMING_SNAKE_CASE : int = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError("""Evaluation requires a dataset name""") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. SCREAMING_SNAKE_CASE : Union[str, Any] = raw_datasets["""validation"""].column_names SCREAMING_SNAKE_CASE : str = """question""" if """question""" in column_names else column_names[0] SCREAMING_SNAKE_CASE : List[Any] = """context""" if """context""" in column_names else column_names[1] SCREAMING_SNAKE_CASE : int = """answers""" if """answers""" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.padding_side == """right""" if args.max_seq_length > tokenizer.model_max_length: logger.warning( F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the' F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) SCREAMING_SNAKE_CASE : List[str] = min(args.max_seq_length, tokenizer.model_max_length) def lowercase ( _snake_case : List[str] ) ->Tuple: """simple docstring""" __snake_case : Optional[Any] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __snake_case : Union[str, Any] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=_snake_case , stride=args.doc_stride , return_overflowing_tokens=_snake_case , return_offsets_mapping=_snake_case , padding='''max_length''' , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __snake_case : Optional[Any] = tokenized_examples.pop('''overflow_to_sample_mapping''' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __snake_case : Dict = [] for i in range(len(tokenized_examples['''input_ids'''] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __snake_case : Tuple = tokenized_examples.sequence_ids(_snake_case ) __snake_case : int = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __snake_case : List[str] = sample_mapping[i] tokenized_examples["example_id"].append(examples['''id'''][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __snake_case : Optional[Any] = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] ) ] return tokenized_examples SCREAMING_SNAKE_CASE : int = raw_datasets["""validation"""] # Validation Feature Creation SCREAMING_SNAKE_CASE : str = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="""Running tokenizer on validation dataset""", ) SCREAMING_SNAKE_CASE : Tuple = default_data_collator SCREAMING_SNAKE_CASE : Tuple = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""]) SCREAMING_SNAKE_CASE : List[str] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def lowercase ( _snake_case : List[Any] , _snake_case : Dict , _snake_case : Tuple , _snake_case : Tuple="eval" ) ->Any: """simple docstring""" __snake_case : Optional[Any] = postprocess_qa_predictions( examples=_snake_case , features=_snake_case , predictions=_snake_case , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_snake_case , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __snake_case : Tuple = [ {'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items() ] else: __snake_case : str = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()] __snake_case : List[Any] = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=_snake_case , label_ids=_snake_case ) SCREAMING_SNAKE_CASE : int = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""") # Evaluation! logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path) with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def lowercase ( _snake_case : str ) ->Dict: """simple docstring""" return trt.volume(engine.get_binding_shape(_snake_case ) ) * engine.get_binding_dtype(_snake_case ).itemsize # Allocate device memory for inputs and outputs. SCREAMING_SNAKE_CASE : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer SCREAMING_SNAKE_CASE : Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) SCREAMING_SNAKE_CASE : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes) SCREAMING_SNAKE_CASE : Any = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. SCREAMING_SNAKE_CASE : Optional[int] = cuda.Stream() # Evaluation logger.info("""***** Running Evaluation *****""") logger.info(F' Num examples = {len(eval_dataset)}') logger.info(F' Batch size = {args.per_device_eval_batch_size}') SCREAMING_SNAKE_CASE : Tuple = 0.0 SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = timeit.default_timer() SCREAMING_SNAKE_CASE : Union[str, Any] = None for step, batch in enumerate(eval_dataloader): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = outputs SCREAMING_SNAKE_CASE : Tuple = torch.tensor(start_logits) SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) SCREAMING_SNAKE_CASE : Any = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) SCREAMING_SNAKE_CASE : Optional[int] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) SCREAMING_SNAKE_CASE : List[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: SCREAMING_SNAKE_CASE : Optional[int] = nested_truncate(all_preds, len(eval_dataset)) SCREAMING_SNAKE_CASE : Union[str, Any] = timeit.default_timer() - start_time logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1000 / niter)) logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1000)) logger.info("""Total Number of Inference = %d""", niter) SCREAMING_SNAKE_CASE : Tuple = post_processing_function(eval_examples, eval_dataset, all_preds) SCREAMING_SNAKE_CASE : Tuple = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(F'Evaluation metrics: {eval_metric}')
24
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ): '''simple docstring''' __snake_case : Any = parent __snake_case : int = batch_size __snake_case : Dict = seq_length __snake_case : List[str] = is_training __snake_case : List[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : Union[str, Any] = use_labels __snake_case : str = vocab_size __snake_case : int = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : int = num_attention_heads __snake_case : str = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : int = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : List[Any] = max_position_embeddings __snake_case : Any = type_vocab_size __snake_case : Dict = type_sequence_label_size __snake_case : Optional[Any] = initializer_range __snake_case : Union[str, Any] = num_labels __snake_case : Any = scope __snake_case : Any = range_bbox def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : List[str] = bbox[i, j, 3] __snake_case : Any = bbox[i, j, 1] __snake_case : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : List[str] = bbox[i, j, 2] __snake_case : Union[str, Any] = bbox[i, j, 0] __snake_case : Dict = t __snake_case : Optional[int] = None if self.use_input_mask: __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __snake_case : Dict = None if self.use_token_type_ids: __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : List[str] = None __snake_case : Union[str, Any] = None if self.use_labels: __snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ ) __snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ ) __snake_case : List[str] = model(a_ , bbox=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[int] = self.num_labels __snake_case : List[str] = LiltForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Tuple = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Dict = config_and_inputs __snake_case : Any = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ =( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =False lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' return True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModelTester(self ) __snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Dict = type self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = LiltModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @require_torch @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ ) __snake_case : Dict = torch.tensor([[1, 2]] , device=a_ ) __snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ ) # forward pass with torch.no_grad(): __snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ ) __snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] ) __snake_case : str = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , ) self.assertTrue(outputs.last_hidden_state.shape , a_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
24
1
"""simple docstring""" import flax.linen as nn import jax import jax.numpy as jnp class _UpperCAmelCase ( nn.Module ): '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =jnp.floataa def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__(self , a_ ): '''simple docstring''' __snake_case , __snake_case , __snake_case , __snake_case : int = hidden_states.shape __snake_case : int = jax.image.resize( a_ , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , ) __snake_case : List[Any] = self.conv(a_ ) return hidden_states class _UpperCAmelCase ( nn.Module ): '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =jnp.floataa def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__(self , a_ ): '''simple docstring''' __snake_case : List[str] = self.conv(a_ ) return hidden_states class _UpperCAmelCase ( nn.Module ): '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =0.0 lowerCamelCase__ =None lowerCamelCase__ =jnp.floataa def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.in_channels if self.out_channels is None else self.out_channels __snake_case : Any = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) __snake_case : Any = nn.Conv( a_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __snake_case : List[str] = nn.Dense(a_ , dtype=self.dtype ) __snake_case : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) __snake_case : List[str] = nn.Dropout(self.dropout_prob ) __snake_case : Union[str, Any] = nn.Conv( a_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __snake_case : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __snake_case : Any = None if use_nin_shortcut: __snake_case : Optional[Any] = nn.Conv( a_ , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , ) def __call__(self , a_ , a_ , a_=True ): '''simple docstring''' __snake_case : Dict = hidden_states __snake_case : int = self.norma(a_ ) __snake_case : Tuple = nn.swish(a_ ) __snake_case : Dict = self.conva(a_ ) __snake_case : Union[str, Any] = self.time_emb_proj(nn.swish(a_ ) ) __snake_case : Dict = jnp.expand_dims(jnp.expand_dims(a_ , 1 ) , 1 ) __snake_case : List[Any] = hidden_states + temb __snake_case : int = self.norma(a_ ) __snake_case : Dict = nn.swish(a_ ) __snake_case : int = self.dropout(a_ , a_ ) __snake_case : Tuple = self.conva(a_ ) if self.conv_shortcut is not None: __snake_case : List[Any] = self.conv_shortcut(a_ ) return hidden_states + residual
24
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ): '''simple docstring''' __snake_case : List[Any] = parent __snake_case : List[Any] = batch_size __snake_case : str = seq_length __snake_case : Any = is_training __snake_case : Any = use_input_mask __snake_case : str = use_token_type_ids __snake_case : Dict = use_labels __snake_case : int = vocab_size __snake_case : Union[str, Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : str = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : str = hidden_act __snake_case : Union[str, Any] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : str = max_position_embeddings __snake_case : Dict = type_vocab_size __snake_case : List[Any] = type_sequence_label_size __snake_case : Union[str, Any] = initializer_range __snake_case : str = num_labels __snake_case : Dict = num_choices __snake_case : Optional[int] = scope def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Dict = None if self.use_input_mask: __snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Tuple = None __snake_case : List[str] = None __snake_case : Dict = None if self.use_labels: __snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[str] = DistilBertModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model(a_ , a_ ) __snake_case : List[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : Optional[Any] = model( a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Any = self.num_labels __snake_case : Optional[int] = DistilBertForSequenceClassification(a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = self.num_labels __snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = self.num_choices __snake_case : Any = DistilBertForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : Optional[int] = model( a_ , attention_mask=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs __snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCamelCase__ =( { 'feature-extraction': DistilBertModel, 'fill-mask': DistilBertForMaskedLM, 'question-answering': DistilBertForQuestionAnswering, 'text-classification': DistilBertForSequenceClassification, 'token-classification': DistilBertForTokenClassification, 'zero-shot': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = DistilBertModelTester(self ) __snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Tuple = DistilBertModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __snake_case : List[str] = True __snake_case : Tuple = model_class(config=a_ ) __snake_case : Any = self._prepare_for_class(a_ , a_ ) __snake_case : Dict = torch.jit.trace( a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) ) __snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ ) loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) ) @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __snake_case : List[Any] = model(a_ , attention_mask=a_ )[0] __snake_case : Tuple = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , a_ ) __snake_case : Optional[int] = torch.tensor( [[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
24
1
"""simple docstring""" def lowercase ( _snake_case : str , _snake_case : str ) ->bool: """simple docstring""" __snake_case : Optional[int] = len(_snake_case ) + 1 __snake_case : Optional[int] = len(_snake_case ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. __snake_case : Tuple = [[0 for i in range(_snake_case )] for j in range(_snake_case )] # since string of zero length match pattern of zero length __snake_case : Tuple = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , _snake_case ): __snake_case : List[Any] = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , _snake_case ): __snake_case : Optional[int] = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , _snake_case ): for j in range(1 , _snake_case ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": __snake_case : List[str] = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: __snake_case : Union[str, Any] = 1 elif pattern[j - 2] in (input_string[i - 1], "."): __snake_case : str = dp[i - 1][j] else: __snake_case : str = 0 else: __snake_case : Tuple = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") SCREAMING_SNAKE_CASE : Any = """aab""" SCREAMING_SNAKE_CASE : str = """c*a*b""" # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(F'{input_string} matches the given pattern {pattern}') else: print(F'{input_string} does not match with the given pattern {pattern}')
24
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]: """simple docstring""" def get_masked_lm_array(_snake_case : str ): __snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : str = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Any = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_array(_snake_case : str ): __snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_layer_array(_snake_case : int , _snake_case : str ): __snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[Any] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ): __snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case ) __snake_case : int = array.reshape(_snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) print(f"""Loading model based on config from {config_path}...""" ) __snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case ) __snake_case : Dict = BertForMaskedLM(_snake_case ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __snake_case : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention __snake_case : BertSelfAttention = layer.attention.self __snake_case : int = get_encoder_attention_layer_array( _snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape ) __snake_case : List[Any] = get_encoder_attention_layer_array( _snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape ) __snake_case : Union[str, Any] = get_encoder_attention_layer_array( _snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape ) # Self-attention Output __snake_case : BertSelfOutput = layer.attention.output __snake_case : Dict = get_encoder_attention_layer_array( _snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape ) __snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' ) __snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' ) # Intermediate __snake_case : BertIntermediate = layer.intermediate __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' ) __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' ) # Output __snake_case : BertOutput = layer.output __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' ) __snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' ) __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' ) __snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' ) # Embeddings __snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' ) __snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' ) __snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' ) __snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' ) # LM Head __snake_case : Optional[Any] = model.cls.predictions.transform __snake_case : Dict = get_masked_lm_array('''dense/kernel''' ) __snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' ) __snake_case : str = get_masked_lm_array('''layer_norm/gamma''' ) __snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' ) __snake_case : Tuple = get_masked_lm_array('''embedding_table''' ) # Pooling __snake_case : Optional[Any] = BertPooler(config=_snake_case ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' ) # Export final model model.save_pretrained(_snake_case ) # Integration test - should load without any errors ;) __snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case ) print(new_model.eval() ) print('''Model conversion was done sucessfully!''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model.""", ) SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
24
1
"""simple docstring""" import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , a_=[0.5, 0.5, 0.5] , a_=[0.5, 0.5, 0.5] , a_=False , ): '''simple docstring''' __snake_case : str = size if size is not None else {'''height''': 20, '''width''': 20} __snake_case : Tuple = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __snake_case : List[Any] = parent __snake_case : List[str] = batch_size __snake_case : str = num_channels __snake_case : List[Any] = image_size __snake_case : List[str] = min_resolution __snake_case : Optional[Any] = max_resolution __snake_case : Dict = do_resize __snake_case : List[str] = size __snake_case : Dict = do_center_crop __snake_case : int = crop_size __snake_case : int = do_normalize __snake_case : Dict = image_mean __snake_case : List[str] = image_std __snake_case : List[str] = do_reduce_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def lowercase ( ) ->Any: """simple docstring""" __snake_case : List[Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) __snake_case : Optional[Any] = Image.open(dataset[0]['''file'''] ) __snake_case : int = Image.open(dataset[1]['''file'''] ) return image, map def lowercase ( ) ->Optional[Any]: """simple docstring""" __snake_case : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) __snake_case : Any = Image.open(ds[0]['''file'''] ) __snake_case : List[str] = Image.open(ds[1]['''file'''] ) __snake_case : Optional[int] = Image.open(ds[2]['''file'''] ) __snake_case : List[str] = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class _UpperCAmelCase ( __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =BeitImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = BeitImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , '''do_resize''' ) ) self.assertTrue(hasattr(a_ , '''size''' ) ) self.assertTrue(hasattr(a_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(a_ , '''center_crop''' ) ) self.assertTrue(hasattr(a_ , '''do_normalize''' ) ) self.assertTrue(hasattr(a_ , '''image_mean''' ) ) self.assertTrue(hasattr(a_ , '''image_std''' ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , a_ ) __snake_case : Optional[int] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=a_ ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Any = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input __snake_case : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Any = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) __snake_case : Dict = [] for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input __snake_case : Optional[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched __snake_case : str = image_processing(a_ , a_ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test not batched input (PIL images) __snake_case , __snake_case : List[Any] = prepare_semantic_single_inputs() __snake_case : List[str] = image_processing(a_ , a_ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched input (PIL images) __snake_case , __snake_case : Any = prepare_semantic_batch_inputs() __snake_case : Dict = image_processing(a_ , a_ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 __snake_case , __snake_case : Dict = prepare_semantic_single_inputs() __snake_case : str = image_processing(a_ , a_ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 1_50 ) __snake_case : int = True __snake_case : Any = image_processing(a_ , a_ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
24
"""simple docstring""" import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_ , a_ = None , a_ = None , a_ = False , **a_ , ): '''simple docstring''' super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ ) __snake_case : Union[str, Any] = Sql( cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = None __snake_case : Dict = None __snake_case : Dict = None __snake_case : List[str] = None self.builder.download_and_prepare( download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , ) # Build dataset for splits __snake_case : Any = self.builder.as_dataset( split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory ) return dataset class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_ , a_ , a_ = None , a_ = None , **a_ , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" ) __snake_case : List[str] = dataset __snake_case : Tuple = name __snake_case : Optional[int] = con __snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __snake_case : Dict = num_proc __snake_case : Dict = to_sql_kwargs def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ ) __snake_case : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ ) __snake_case : Any = self.to_sql_kwargs.pop('''index''' , a_ ) __snake_case : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs ) return written def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case , __snake_case , __snake_case : Optional[Any] = args __snake_case : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs __snake_case : Dict = query_table( table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , ) __snake_case : Tuple = batch.to_pandas() __snake_case : str = df.to_sql(self.name , self.con , index=a_ , **a_ ) return num_rows or len(a_ ) def SCREAMING_SNAKE_CASE (self , a_ , **a_ ): '''simple docstring''' __snake_case : int = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: __snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
24
1
"""simple docstring""" from math import factorial def lowercase ( _snake_case : int , _snake_case : int ) ->int: """simple docstring""" if n < k or k < 0: raise ValueError('''Please enter positive integers for n and k where n >= k''' ) return factorial(_snake_case ) // (factorial(_snake_case ) * factorial(n - k )) if __name__ == "__main__": print( """The number of five-card hands possible from a standard""", F'fifty-two card deck is: {combinations(52, 5)}\n', ) print( """If a class of 40 students must be arranged into groups of""", F'4 for group projects, there are {combinations(40, 4)} ways', """to arrange them.\n""", ) print( """If 10 teams are competing in a Formula One race, there""", F'are {combinations(10, 3)} ways that first, second and', """third place can be awarded.""", )
24
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[int] = { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""", } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='lxmert' lowerCamelCase__ ={} def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = vocab_size __snake_case : List[str] = hidden_size __snake_case : List[Any] = num_attention_heads __snake_case : int = hidden_act __snake_case : int = intermediate_size __snake_case : Any = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Tuple = max_position_embeddings __snake_case : List[str] = type_vocab_size __snake_case : str = initializer_range __snake_case : Tuple = layer_norm_eps __snake_case : List[Any] = num_qa_labels __snake_case : int = num_object_labels __snake_case : Optional[Any] = num_attr_labels __snake_case : Union[str, Any] = l_layers __snake_case : Optional[int] = x_layers __snake_case : Optional[int] = r_layers __snake_case : Tuple = visual_feat_dim __snake_case : Optional[int] = visual_pos_dim __snake_case : Dict = visual_loss_normalizer __snake_case : str = task_matched __snake_case : Optional[Any] = task_mask_lm __snake_case : List[str] = task_obj_predict __snake_case : Optional[Any] = task_qa __snake_case : Any = visual_obj_loss __snake_case : int = visual_attr_loss __snake_case : List[Any] = visual_feat_loss __snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**a_ )
24
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right SCREAMING_SNAKE_CASE : str = 25_6047 SCREAMING_SNAKE_CASE : Union[str, Any] = 25_6145 @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =NllbTokenizer lowerCamelCase__ =NllbTokenizerFast lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ ={} def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __snake_case : Dict = NllbTokenizer(a_ , keep_accents=a_ ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = NllbTokenizer(a_ , keep_accents=a_ ) __snake_case : Any = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(a_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __snake_case : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( a_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __snake_case : List[str] = tokenizer.convert_tokens_to_ids(a_ ) self.assertListEqual( a_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __snake_case : str = tokenizer.convert_ids_to_tokens(a_ ) self.assertListEqual( a_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __snake_case : Any = self.rust_tokenizer_class.from_pretrained(a_ , **a_ ) __snake_case : str = self.tokenizer_class.from_pretrained(a_ , **a_ ) __snake_case : List[Any] = tempfile.mkdtemp() __snake_case : List[Any] = tokenizer_r.save_pretrained(a_ ) __snake_case : Any = tokenizer_p.save_pretrained(a_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __snake_case : Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(a_ , a_ ) # Checks everything loads correctly in the same way __snake_case : int = tokenizer_r.from_pretrained(a_ ) __snake_case : Dict = tokenizer_p.from_pretrained(a_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(a_ , a_ ) ) shutil.rmtree(a_ ) # Save tokenizer rust, legacy_format=True __snake_case : str = tempfile.mkdtemp() __snake_case : Any = tokenizer_r.save_pretrained(a_ , legacy_format=a_ ) __snake_case : Dict = tokenizer_p.save_pretrained(a_ ) # Checks it save with the same files self.assertSequenceEqual(a_ , a_ ) # Checks everything loads correctly in the same way __snake_case : List[Any] = tokenizer_r.from_pretrained(a_ ) __snake_case : Any = tokenizer_p.from_pretrained(a_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(a_ , a_ ) ) shutil.rmtree(a_ ) # Save tokenizer rust, legacy_format=False __snake_case : Dict = tempfile.mkdtemp() __snake_case : Optional[Any] = tokenizer_r.save_pretrained(a_ , legacy_format=a_ ) __snake_case : str = tokenizer_p.save_pretrained(a_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __snake_case : Union[str, Any] = tokenizer_r.from_pretrained(a_ ) __snake_case : Union[str, Any] = tokenizer_p.from_pretrained(a_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(a_ , a_ ) ) shutil.rmtree(a_ ) @require_torch def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if not self.test_seqaseq: return __snake_case : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # Longer text that will definitely require truncation. __snake_case : List[str] = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for''' ''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons''' ''' will only worsen the violence and misery for millions of people.''', ] __snake_case : Optional[Any] = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al''' ''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi''' ''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] try: __snake_case : Optional[int] = tokenizer.prepare_seqaseq_batch( src_texts=a_ , tgt_texts=a_ , max_length=3 , max_target_length=10 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 10 ) # max_target_length will default to max_length if not specified __snake_case : Tuple = tokenizer.prepare_seqaseq_batch( a_ , tgt_texts=a_ , max_length=3 , return_tensors='''pt''' ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) __snake_case : Optional[Any] = tokenizer.prepare_seqaseq_batch( src_texts=a_ , max_length=3 , max_target_length=10 , return_tensors='''pt''' ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn('''decoder_input_ids''' , a_ ) @unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __snake_case : str = [AddedToken('''<special>''' , lstrip=a_ )] __snake_case : str = self.rust_tokenizer_class.from_pretrained( a_ , additional_special_tokens=a_ , **a_ ) __snake_case : Dict = tokenizer_r.encode('''Hey this is a <special> token''' ) __snake_case : List[Any] = tokenizer_r.encode('''<special>''' , add_special_tokens=a_ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: __snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained( a_ , additional_special_tokens=a_ , **a_ , ) __snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained( a_ , additional_special_tokens=a_ , **a_ ) __snake_case : Optional[int] = tokenizer_p.encode('''Hey this is a <special> token''' ) __snake_case : List[Any] = tokenizer_cr.encode('''Hey this is a <special> token''' ) self.assertEqual(a_ , a_ ) self.assertEqual(a_ , a_ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ ='facebook/nllb-200-distilled-600M' lowerCamelCase__ =[ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] lowerCamelCase__ =[ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] lowerCamelCase__ =[ 256047, 16297, 134408, 8165, 248066, 14734, 950, 1135, 105721, 3573, 83, 27352, 108, 49486, 2, ] @classmethod def SCREAMING_SNAKE_CASE (cls ): '''simple docstring''' __snake_case : NllbTokenizer = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' ) __snake_case : Union[str, Any] = 1 return cls def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 25_60_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 25_60_02 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 25_60_57 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.assertIn(a_ , self.tokenizer.all_special_ids ) # fmt: off __snake_case : str = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47] # fmt: on __snake_case : List[Any] = self.tokenizer.decode(a_ , skip_special_tokens=a_ ) __snake_case : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a_ ) self.assertEqual(a_ , a_ ) self.assertNotIn(self.tokenizer.eos_token , a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , a_ ) __snake_case : Optional[int] = 10 __snake_case : Optional[int] = self.tokenizer(a_ , max_length=a_ , truncation=a_ ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , a_ ) self.assertEqual(len(a_ ) , a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_62_03, 3] ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = tempfile.mkdtemp() __snake_case : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(a_ ) __snake_case : Tuple = NllbTokenizer.from_pretrained(a_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a_ ) @require_torch def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=a_ , truncation=a_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) __snake_case : List[Any] = shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''] ) self.assertIsInstance(a_ , a_ ) self.assertEqual((2, 15) , batch.input_ids.shape ) self.assertEqual((2, 15) , batch.attention_mask.shape ) __snake_case : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , a_ ) self.assertEqual(a_ , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.tokenizer(self.src_text , padding=a_ , truncation=a_ , max_length=3 , return_tensors='''pt''' ) __snake_case : int = self.tokenizer( text_target=self.tgt_text , padding=a_ , truncation=a_ , max_length=10 , return_tensors='''pt''' ) __snake_case : Any = targets['''input_ids'''] __snake_case : List[str] = shift_tokens_right( a_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' ) self.assertEqual( nested_simplify(a_ ) , { # A, test, EOS, en_XX '''input_ids''': [[25_60_47, 70, 73_56, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_60_57, } , ) @require_torch def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = True __snake_case : Optional[int] = self.tokenizer( '''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' ) self.assertEqual( inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] ) __snake_case : Union[str, Any] = False __snake_case : str = self.tokenizer( '''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' ) self.assertEqual( inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
24
"""simple docstring""" def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" __snake_case : Tuple = len(_snake_case ) __snake_case : str = sum(_snake_case ) __snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __snake_case : Optional[Any] = True for i in range(1 , s + 1 ): __snake_case : int = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __snake_case : Union[str, Any] = dp[i][j - 1] if arr[i - 1] <= j: __snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __snake_case : List[str] = s - 2 * j break return diff
24
1
"""simple docstring""" import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py SCREAMING_SNAKE_CASE : Any = """src/transformers""" SCREAMING_SNAKE_CASE : Dict = """docs/source/en""" SCREAMING_SNAKE_CASE : List[str] = """.""" def lowercase ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Tuple ) ->Optional[Any]: """simple docstring""" with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __snake_case : List[Any] = f.readlines() # Find the start prompt. __snake_case : Dict = 0 while not lines[start_index].startswith(_snake_case ): start_index += 1 start_index += 1 __snake_case : Optional[int] = start_index while not lines[end_index].startswith(_snake_case ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | SCREAMING_SNAKE_CASE : Optional[int] = """Model|Encoder|Decoder|ForConditionalGeneration""" # Regexes that match TF/Flax/PT model names. SCREAMING_SNAKE_CASE : List[Any] = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # This is to make sure the transformers module imported is the one in the repo. SCREAMING_SNAKE_CASE : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH) def lowercase ( _snake_case : Any ) ->Optional[int]: """simple docstring""" __snake_case : str = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , _snake_case ) return [m.group(0 ) for m in matches] def lowercase ( _snake_case : Dict , _snake_case : List[str] ) ->List[Any]: """simple docstring""" __snake_case : Optional[int] = 2 if text == '''✅''' or text == '''❌''' else len(_snake_case ) __snake_case : str = (width - text_length) // 2 __snake_case : List[str] = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowercase ( ) ->List[str]: """simple docstring""" __snake_case : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __snake_case : Optional[int] = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } __snake_case : Union[str, Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. __snake_case : List[Any] = collections.defaultdict(_snake_case ) __snake_case : List[Any] = collections.defaultdict(_snake_case ) __snake_case : Optional[Any] = collections.defaultdict(_snake_case ) __snake_case : Optional[int] = collections.defaultdict(_snake_case ) __snake_case : Any = collections.defaultdict(_snake_case ) # Let's lookup through all transformers object (once). for attr_name in dir(_snake_case ): __snake_case : List[str] = None if attr_name.endswith('''Tokenizer''' ): __snake_case : Tuple = slow_tokenizers __snake_case : Union[str, Any] = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): __snake_case : List[Any] = fast_tokenizers __snake_case : Optional[int] = attr_name[:-13] elif _re_tf_models.match(_snake_case ) is not None: __snake_case : List[Any] = tf_models __snake_case : int = _re_tf_models.match(_snake_case ).groups()[0] elif _re_flax_models.match(_snake_case ) is not None: __snake_case : Tuple = flax_models __snake_case : Optional[int] = _re_flax_models.match(_snake_case ).groups()[0] elif _re_pt_models.match(_snake_case ) is not None: __snake_case : Optional[Any] = pt_models __snake_case : List[Any] = _re_pt_models.match(_snake_case ).groups()[0] if lookup_dict is not None: while len(_snake_case ) > 0: if attr_name in model_name_to_prefix.values(): __snake_case : Optional[int] = True break # Try again after removing the last word in the name __snake_case : Dict = ''''''.join(camel_case_split(_snake_case )[:-1] ) # Let's build that table! __snake_case : int = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) __snake_case : List[Any] = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). __snake_case : int = [len(_snake_case ) + 2 for c in columns] __snake_case : List[Any] = max([len(_snake_case ) for name in model_names] ) + 2 # Build the table per se __snake_case : Optional[int] = '''|''' + '''|'''.join([_center_text(_snake_case , _snake_case ) for c, w in zip(_snake_case , _snake_case )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" __snake_case : Optional[int] = {True: '''✅''', False: '''❌'''} for name in model_names: __snake_case : Optional[Any] = model_name_to_prefix[name] __snake_case : Optional[Any] = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(_snake_case , _snake_case ) for l, w in zip(_snake_case , _snake_case )] ) + "|\n" return table def lowercase ( _snake_case : List[str]=False ) ->Dict: """simple docstring""" __snake_case , __snake_case , __snake_case , __snake_case : int = _find_text_in_file( filename=os.path.join(_snake_case , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) __snake_case : Optional[Any] = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(_snake_case , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() check_model_table(args.fix_and_overwrite)
24
"""simple docstring""" from collections.abc import Callable def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float: """simple docstring""" __snake_case : float = a __snake_case : float = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: __snake_case : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: __snake_case : List[str] = mid else: __snake_case : str = mid __snake_case : str = start + (end - start) / 2.0 return mid def lowercase ( _snake_case : float ) ->float: """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
24
1
"""simple docstring""" import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def lowercase ( _snake_case : int = 3 ) ->qiskit.result.counts.Counts: """simple docstring""" if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) __snake_case : List[str] = QuantumRegister(_snake_case , '''qr''' ) __snake_case : Tuple = ClassicalRegister(_snake_case , '''cr''' ) __snake_case : int = QuantumCircuit(_snake_case , _snake_case ) __snake_case : Tuple = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots __snake_case : Optional[int] = Aer.get_backend('''qasm_simulator''' ) __snake_case : Any = execute(_snake_case , _snake_case , shots=10_000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( F'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[str] = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : str = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase ( _snake_case : str , _snake_case : str , _snake_case : List[str] , _snake_case : int ) ->Tuple: """simple docstring""" __snake_case : str = sorted(zip(_snake_case , _snake_case ) , key=lambda _snake_case : x[0] / x[1] , reverse=_snake_case ) __snake_case , __snake_case : Optional[int] = [i[0] for i in r], [i[1] for i in r] __snake_case : str = list(accumulate(_snake_case ) ) __snake_case : Dict = bisect(_snake_case , _snake_case ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =['image_processor', 'tokenizer'] lowerCamelCase__ ='CLIPImageProcessor' lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self , a_=None , a_=None , **a_ ): '''simple docstring''' __snake_case : Any = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a_ , ) __snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' ) __snake_case : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a_ , a_ ) def __call__(self , a_=None , a_=None , a_=None , **a_ ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if images is not None: __snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: __snake_case : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.batch_decode(*a_ , **a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.decode(*a_ , **a_ ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.tokenizer.model_input_names __snake_case : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
24
1
"""simple docstring""" # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
24
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE : List[Any] = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE : Tuple = { """facebook/mbart-large-en-ro""": 1024, """facebook/mbart-large-cc25""": 1024, } # fmt: off SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =['input_ids', 'attention_mask'] lowerCamelCase__ =MBartTokenizer lowerCamelCase__ =[] lowerCamelCase__ =[] def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token super().__init__( vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , ) __snake_case : Tuple = vocab_file __snake_case : Optional[Any] = False if not self.vocab_file else True __snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) __snake_case : Optional[int] = { lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX''' __snake_case : Any = self.convert_tokens_to_ids(self._src_lang ) __snake_case : Dict = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' __snake_case : Tuple = [self.sep_token_id] __snake_case : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __snake_case : Optional[int] = src_lang __snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ ) __snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ ) __snake_case : int = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ): '''simple docstring''' __snake_case : int = src_lang __snake_case : List[Any] = tgt_lang return super().prepare_seqaseq_batch(a_ , a_ , **a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : List[Any] = [] __snake_case : Any = [self.eos_token_id, self.cur_lang_code] __snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : Optional[Any] = [] __snake_case : Dict = [self.eos_token_id, self.cur_lang_code] __snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(a_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return __snake_case : Optional[Any] = os.path.join( a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file , a_ ) return (out_vocab_file,)
24
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Union[str, Any] = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class _UpperCAmelCase ( __snake_case, __snake_case ): '''simple docstring''' lowerCamelCase__ ='nat' lowerCamelCase__ ={ 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__(self , a_=4 , a_=3 , a_=64 , a_=[3, 4, 6, 5] , a_=[2, 4, 8, 16] , a_=7 , a_=3.0 , a_=True , a_=0.0 , a_=0.0 , a_=0.1 , a_="gelu" , a_=0.02 , a_=1E-5 , a_=0.0 , a_=None , a_=None , **a_ , ): '''simple docstring''' super().__init__(**a_ ) __snake_case : List[str] = patch_size __snake_case : str = num_channels __snake_case : Tuple = embed_dim __snake_case : Union[str, Any] = depths __snake_case : Optional[Any] = len(a_ ) __snake_case : Tuple = num_heads __snake_case : str = kernel_size __snake_case : Optional[int] = mlp_ratio __snake_case : int = qkv_bias __snake_case : Any = hidden_dropout_prob __snake_case : List[str] = attention_probs_dropout_prob __snake_case : Dict = drop_path_rate __snake_case : List[str] = hidden_act __snake_case : Union[str, Any] = layer_norm_eps __snake_case : Optional[Any] = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __snake_case : Optional[int] = int(embed_dim * 2 ** (len(a_ ) - 1) ) __snake_case : Any = layer_scale_init_value __snake_case : Tuple = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(a_ ) + 1 )] __snake_case , __snake_case : int = get_aligned_output_features_output_indices( out_features=a_ , out_indices=a_ , stage_names=self.stage_names )
24
"""simple docstring""" import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__) @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None if is_torch_available(): import torch from torch.utils.data import Dataset class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ): '''simple docstring''' __snake_case : Any = hans_processors[task]() __snake_case : int = os.path.join( a_ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , ) __snake_case : Tuple = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Dict = label_list[2], label_list[1] __snake_case : Any = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case : int = cached_features_file + '''.lock''' with FileLock(a_ ): if os.path.exists(a_ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case : Union[str, Any] = torch.load(a_ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case : Dict = ( processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) ) logger.info('''Training examples: %s''' , len(a_ ) ) __snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) logger.info('''Saving features into cached file %s''' , a_ ) torch.save(self.features , a_ ) def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list if is_tf_available(): import tensorflow as tf class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ): '''simple docstring''' __snake_case : List[Any] = hans_processors[task]() __snake_case : str = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Tuple = label_list[2], label_list[1] __snake_case : Dict = label_list __snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) __snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_00_00 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case : Union[str, Any] = tf.data.Dataset.from_generator( a_ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.dataset def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return ["contradiction", "entailment", "neutral"] def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = [] for i, line in enumerate(a_ ): if i == 0: continue __snake_case : Tuple = '''%s-%s''' % (set_type, line[0]) __snake_case : Dict = line[5] __snake_case : int = line[6] __snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case : List[Any] = line[0] examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) ) return examples def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]: """simple docstring""" __snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )} __snake_case : Tuple = [] for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ): if ex_index % 10_000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case : List[Any] = tokenizer( example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , ) __snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0 __snake_case : Union[str, Any] = int(example.pairID ) features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features SCREAMING_SNAKE_CASE : Dict = { """hans""": 3, } SCREAMING_SNAKE_CASE : str = { """hans""": HansProcessor, }
24
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def lowercase ( _snake_case : Dict=None ) ->int: """simple docstring""" __snake_case : str = argparse.ArgumentParser(add_help=_snake_case , allow_abbrev=_snake_case ) # The main config parser __snake_case : Tuple = config_command_parser(_snake_case ) # The subparser to add commands to __snake_case : List[str] = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' ) # Then add other parsers with the parent parser default_command_parser(_snake_case , parents=[parent_parser] ) update_command_parser(_snake_case , parents=[parent_parser] ) return config_parser def lowercase ( ) ->Tuple: """simple docstring""" __snake_case : Dict = get_config_parser() __snake_case : Any = config_parser.parse_args() if not hasattr(_snake_case , '''func''' ): config_parser.print_help() exit(1 ) # Run args.func(_snake_case ) if __name__ == "__main__": main()
24
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[str] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='gptsan-japanese' lowerCamelCase__ =[ 'past_key_values', ] lowerCamelCase__ ={ 'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__(self , a_=3_60_00 , a_=12_80 , a_=10_24 , a_=81_92 , a_=40_96 , a_=1_28 , a_=10 , a_=0 , a_=16 , a_=16 , a_=1_28 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_59_98 , a_=3_59_95 , a_=3_59_99 , **a_ , ): '''simple docstring''' __snake_case : Any = vocab_size __snake_case : str = max_position_embeddings __snake_case : Any = d_model __snake_case : List[str] = d_ff __snake_case : Dict = d_ext __snake_case : Optional[Any] = d_spout __snake_case : int = num_switch_layers __snake_case : List[Any] = num_ext_layers __snake_case : Any = num_switch_layers + num_ext_layers __snake_case : Optional[int] = num_heads __snake_case : Tuple = num_experts __snake_case : List[Any] = expert_capacity __snake_case : Dict = dropout_rate __snake_case : Optional[Any] = layer_norm_epsilon __snake_case : Dict = router_bias __snake_case : str = router_jitter_noise __snake_case : List[str] = router_dtype __snake_case : Union[str, Any] = router_ignore_padding_tokens __snake_case : List[str] = output_hidden_states __snake_case : Optional[Any] = output_attentions __snake_case : Any = initializer_factor __snake_case : int = output_router_logits __snake_case : Union[str, Any] = use_cache super().__init__( separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
24
1
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ): '''simple docstring''' __snake_case : Tuple = parent __snake_case : str = batch_size __snake_case : Tuple = seq_length __snake_case : List[str] = is_training __snake_case : Optional[Any] = use_input_mask __snake_case : Union[str, Any] = use_token_type_ids __snake_case : List[Any] = use_labels __snake_case : Tuple = vocab_size __snake_case : List[Any] = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : int = num_attention_heads __snake_case : Any = intermediate_size __snake_case : str = hidden_act __snake_case : List[str] = hidden_dropout_prob __snake_case : Dict = attention_probs_dropout_prob __snake_case : Union[str, Any] = max_position_embeddings __snake_case : int = type_vocab_size __snake_case : List[Any] = type_sequence_label_size __snake_case : Union[str, Any] = initializer_range __snake_case : Union[str, Any] = num_labels __snake_case : Optional[int] = num_choices __snake_case : str = scope def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Any = None if self.use_input_mask: __snake_case : str = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Optional[Any] = None if self.use_token_type_ids: __snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : List[str] = None __snake_case : str = None __snake_case : str = None if self.use_labels: __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = LlamaModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : str = model(a_ , attention_mask=a_ ) __snake_case : Tuple = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : int = True __snake_case : Any = LlamaModel(a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model( a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , ) __snake_case : Tuple = model( a_ , attention_mask=a_ , encoder_hidden_states=a_ , ) __snake_case : int = model(a_ , attention_mask=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[Any] = LlamaForCausalLM(config=a_ ) model.to(a_ ) model.eval() __snake_case : str = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[int] = True __snake_case : int = True __snake_case : Optional[Any] = LlamaForCausalLM(config=a_ ) model.to(a_ ) model.eval() # first forward pass __snake_case : List[Any] = model( a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , use_cache=a_ , ) __snake_case : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __snake_case : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __snake_case : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __snake_case : int = torch.cat([input_ids, next_tokens] , dim=-1 ) __snake_case : int = torch.cat([input_mask, next_mask] , dim=-1 ) __snake_case : int = model( a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , output_hidden_states=a_ , )['''hidden_states'''][0] __snake_case : List[Any] = model( a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , past_key_values=a_ , output_hidden_states=a_ , )['''hidden_states'''][0] # select random slice __snake_case : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __snake_case : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach() __snake_case : Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1E-3 ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Any = config_and_inputs __snake_case : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =(LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () lowerCamelCase__ =(LlamaForCausalLM,) if is_torch_available() else () lowerCamelCase__ =( { 'feature-extraction': LlamaModel, 'text-classification': LlamaForSequenceClassification, 'text-generation': LlamaForCausalLM, 'zero-shot': LlamaForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =False lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LlamaModelTester(self ) __snake_case : Union[str, Any] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : List[str] = type self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = 3 __snake_case : Any = input_dict['''input_ids'''] __snake_case : List[Any] = input_ids.ne(1 ).to(a_ ) __snake_case : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __snake_case : List[Any] = LlamaForSequenceClassification(a_ ) model.to(a_ ) model.eval() __snake_case : Tuple = model(a_ , attention_mask=a_ , labels=a_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Optional[Any] = 3 __snake_case : Tuple = '''single_label_classification''' __snake_case : Optional[int] = input_dict['''input_ids'''] __snake_case : int = input_ids.ne(1 ).to(a_ ) __snake_case : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __snake_case : Tuple = LlamaForSequenceClassification(a_ ) model.to(a_ ) model.eval() __snake_case : int = model(a_ , attention_mask=a_ , labels=a_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Dict = 3 __snake_case : int = '''multi_label_classification''' __snake_case : Union[str, Any] = input_dict['''input_ids'''] __snake_case : Any = input_ids.ne(1 ).to(a_ ) __snake_case : Optional[int] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __snake_case : List[Any] = LlamaForSequenceClassification(a_ ) model.to(a_ ) model.eval() __snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Any = ids_tensor([1, 10] , config.vocab_size ) __snake_case : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __snake_case : Any = LlamaModel(a_ ) original_model.to(a_ ) original_model.eval() __snake_case : List[Any] = original_model(a_ ).last_hidden_state __snake_case : str = original_model(a_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __snake_case : str = {'''type''': scaling_type, '''factor''': 10.0} __snake_case : Optional[int] = LlamaModel(a_ ) scaled_model.to(a_ ) scaled_model.eval() __snake_case : Union[str, Any] = scaled_model(a_ ).last_hidden_state __snake_case : List[Any] = scaled_model(a_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(a_ , a_ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(a_ , a_ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(a_ , a_ , atol=1E-5 ) ) @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] __snake_case : Any = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' ) __snake_case : List[str] = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 __snake_case : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , a_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __snake_case : List[str] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , a_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] __snake_case : Tuple = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' ) __snake_case : Union[str, Any] = model(torch.tensor(a_ ) ) # Expected mean on dim = -1 __snake_case : Dict = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , a_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __snake_case : Tuple = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , a_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] __snake_case : int = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' ) __snake_case : Dict = model(torch.tensor(a_ ) ) # Expected mean on dim = -1 __snake_case : List[str] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , a_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __snake_case : Dict = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , a_ , atol=1E-2 , rtol=1E-2 ) @unittest.skip( '''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] __snake_case : List[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' ) __snake_case : List[Any] = model(torch.tensor(a_ ) ) __snake_case : Dict = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , a_ , atol=1E-2 , rtol=1E-2 ) # fmt: off __snake_case : Tuple = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , a_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Model is curently gated''' ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi''' __snake_case : Dict = '''Simply put, the theory of relativity states that ''' __snake_case : Optional[int] = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ) __snake_case : Tuple = tokenizer.encode(a_ , return_tensors='''pt''' ) __snake_case : Optional[Any] = LlamaForCausalLM.from_pretrained( '''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=a_ ) # greedy generation outputs __snake_case : Union[str, Any] = model.generate(a_ , max_new_tokens=64 , top_p=a_ , temperature=1 , do_sample=a_ ) __snake_case : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=a_ ) self.assertEqual(a_ , a_ )
24
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } SCREAMING_SNAKE_CASE : int = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def lowercase ( _snake_case : Optional[int] ) ->int: """simple docstring""" __snake_case : int = {} with open(_snake_case , '''r''' ) as file: for line_number, line in enumerate(_snake_case ): __snake_case : Union[str, Any] = line.strip() if line: __snake_case : str = line.split() __snake_case : Union[str, Any] = line_number __snake_case : Dict = words[0] __snake_case : str = value return result def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]: """simple docstring""" for attribute in key.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : str = '''param''' if weight_type is not None and weight_type != "param": __snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape elif weight_type is not None and weight_type == "param": __snake_case : Optional[Any] = hf_pointer for attribute in hf_param_name.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : List[str] = shape_pointer.shape # let's reduce dimension __snake_case : int = value[0] else: __snake_case : int = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __snake_case : List[Any] = value elif weight_type == "weight_g": __snake_case : Tuple = value elif weight_type == "weight_v": __snake_case : str = value elif weight_type == "bias": __snake_case : str = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __snake_case : List[Any] = getattr(_snake_case , _snake_case ) __snake_case : int = value else: __snake_case : List[Any] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int: """simple docstring""" __snake_case : Optional[Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : List[str] = '''param''' if weight_type is not None and weight_type != "param": __snake_case : str = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __snake_case : Tuple = '''.'''.join([key, hf_param_name] ) else: __snake_case : Optional[int] = key __snake_case : List[Any] = value if '''lm_head''' in full_key else value[0] SCREAMING_SNAKE_CASE : Tuple = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict: """simple docstring""" __snake_case : Tuple = False for key, mapped_key in MAPPING.items(): __snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __snake_case : int = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2] __snake_case : Tuple = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: __snake_case : Union[str, Any] = '''weight_g''' elif "weight_v" in name: __snake_case : List[str] = '''weight_v''' elif "bias" in name: __snake_case : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case : List[Any] = '''weight''' else: __snake_case : Union[str, Any] = None if hf_dict is not None: rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) else: set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) return is_used return is_used def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any: """simple docstring""" __snake_case : Union[str, Any] = [] __snake_case : Union[str, Any] = fairseq_model.state_dict() __snake_case : str = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __snake_case : str = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) __snake_case : Union[str, Any] = True else: __snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case ) if not is_used: unused_weights.append(_snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1] __snake_case : str = name.split('''.''' ) __snake_case : Optional[int] = int(items[0] ) __snake_case : Any = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __snake_case : int = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __snake_case : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_snake_case ) @torch.no_grad() def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict: """simple docstring""" if config_path is not None: __snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case ) else: __snake_case : Tuple = WavaVecaConfig() if is_seq_class: __snake_case : Optional[int] = read_txt_into_dict(_snake_case ) __snake_case : List[Any] = idalabel __snake_case : int = WavaVecaForSequenceClassification(_snake_case ) __snake_case : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) feature_extractor.save_pretrained(_snake_case ) elif is_finetuned: if dict_path: __snake_case : int = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Tuple = target_dict.pad_index __snake_case : int = target_dict.bos_index __snake_case : Tuple = target_dict.eos_index __snake_case : Optional[Any] = len(target_dict.symbols ) __snake_case : Any = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) __snake_case : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched __snake_case : Dict = 0 __snake_case : List[Any] = 1 with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_snake_case , _snake_case ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) __snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False __snake_case : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) __snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) __snake_case : Optional[int] = WavaVecaForCTC(_snake_case ) else: __snake_case : Tuple = WavaVecaForPreTraining(_snake_case ) if is_finetuned or is_seq_class: __snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' ) __snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case ) __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case ) __snake_case : int = model[0].eval() recursively_load_weights(_snake_case , _snake_case , not is_finetuned ) hf_wavavec.save_pretrained(_snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) SCREAMING_SNAKE_CASE : Any = parser.parse_args() SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
24
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Tuple = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE : Optional[int] = { """vocab_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""", }, """tokenizer_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE : Dict = { """camembert-base""": 512, } SCREAMING_SNAKE_CASE : Tuple = """▁""" class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =['input_ids', 'attention_mask'] lowerCamelCase__ =CamembertTokenizer def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=["<s>NOTUSED", "</s>NOTUSED"] , **a_ , ): '''simple docstring''' __snake_case : Any = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token super().__init__( a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , additional_special_tokens=a_ , **a_ , ) __snake_case : List[str] = vocab_file __snake_case : List[str] = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case : Optional[Any] = [self.cls_token_id] __snake_case : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' __snake_case : Optional[int] = [self.sep_token_id] __snake_case : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(a_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __snake_case : Any = os.path.join( a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file , a_ ) return (out_vocab_file,)
24
"""simple docstring""" from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=__snake_case ): '''simple docstring''' lowerCamelCase__ =['transformers', 'torch', 'note_seq'] def __init__(self , *a_ , **a_ ): '''simple docstring''' requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
24
1
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=__snake_case ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =field(default='automatic-speech-recognition', metadata={'include_in_asdict_even_if_is_default': True} ) lowerCamelCase__ =Features({'audio': Audio()} ) lowerCamelCase__ =Features({'transcription': Value('string' )} ) lowerCamelCase__ ="audio" lowerCamelCase__ ="transcription" def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if self.audio_column not in features: raise ValueError(f"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , a_ ): raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" ) __snake_case : Optional[int] = copy.deepcopy(self ) __snake_case : str = self.input_schema.copy() __snake_case : List[str] = features[self.audio_column] __snake_case : Tuple = input_schema return task_template @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return {self.audio_column: "audio", self.transcription_column: "transcription"}
24
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ): '''simple docstring''' __snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20} __snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __snake_case : Tuple = parent __snake_case : Tuple = batch_size __snake_case : Tuple = num_channels __snake_case : List[str] = image_size __snake_case : Optional[Any] = min_resolution __snake_case : List[Any] = max_resolution __snake_case : List[Any] = do_resize __snake_case : Dict = size __snake_case : Dict = do_center_crop __snake_case : Dict = crop_size __snake_case : str = do_flip_channel_order def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _UpperCAmelCase ( __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = MobileViTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , '''do_resize''' ) ) self.assertTrue(hasattr(a_ , '''size''' ) ) self.assertTrue(hasattr(a_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(a_ , '''center_crop''' ) ) self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input __snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input __snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
24
1
"""simple docstring""" import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = logging.get_logger() # the current default level is logging.WARNING __snake_case : Any = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = logging.get_verbosity() __snake_case : Optional[int] = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case : str = '''Testing 1, 2, 3''' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(a_ ) as cl: logger.warning(a_ ) self.assertEqual(cl.out , msg + '''\n''' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(a_ ) as cl: logger.warning(a_ ) self.assertEqual(cl.out , '''''' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(a_ ) as cl: logger.warning(a_ ) self.assertEqual(cl.out , msg + '''\n''' ) # restore to the original level logging.set_verbosity(a_ ) @mockenv(TRANSFORMERS_VERBOSITY='''error''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' transformers.utils.logging._reset_library_root_logger() # this action activates the env var __snake_case : List[str] = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case : Any = os.getenv('''TRANSFORMERS_VERBOSITY''' , a_ ) __snake_case : Union[str, Any] = logging.log_levels[env_level_str] __snake_case : Tuple = logging.get_verbosity() self.assertEqual( a_ , a_ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level __snake_case : Dict = '''''' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='''super-error''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' transformers.utils.logging._reset_library_root_logger() __snake_case : Union[str, Any] = logging.logging.getLogger() with CaptureLogger(a_ ) as cl: # this action activates the env var logging.get_logger('''transformers.models.bart.tokenization_bart''' ) self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out ) # no need to restore as nothing was changed def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' transformers.utils.logging._reset_library_root_logger() __snake_case : Tuple = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case : Optional[Any] = '''Testing 1, 2, 3''' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ): # nothing should be logged as env var disables this method with CaptureLogger(a_ ) as cl: logger.warning_advice(a_ ) self.assertEqual(cl.out , '''''' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(a_ ) as cl: logger.warning_advice(a_ ) self.assertEqual(cl.out , msg + '''\n''' ) def lowercase ( ) ->Any: """simple docstring""" disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
24
"""simple docstring""" import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def lowercase ( ) ->Optional[int]: """simple docstring""" __snake_case : int = torch.nn.Linear(2 , 4 ) __snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 ) __snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) __snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) __snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def lowercase ( _snake_case : str ) ->Optional[Any]: """simple docstring""" return (model.weight.abs().sum() + model.bias.abs().sum()).item() def lowercase ( _snake_case : Union[str, Any] ) ->Tuple: """simple docstring""" __snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(_snake_case ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(a_ ): __snake_case : Any = Accelerator(cpu=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case : Optional[int] = GradientState() assert state.num_steps == 1 __snake_case : str = 4 assert state.num_steps == 4 assert state.sync_gradients is True __snake_case : List[Any] = False assert state.sync_gradients is False GradientState._reset_state() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*a_ , **a_ ): pass with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ): __snake_case : List[Any] = Accelerator() self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : Any = get_signature(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # make sure loaded weights match accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : List[Any] = get_signature(a_ ) # saving hook def save_config(a_ , a_ , a_ ): __snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__} with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f: json.dump(a_ , a_ ) # loading hook def load_config(a_ , a_ ): with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f: __snake_case : Any = json.load(a_ ) __snake_case : List[str] = config['''class_name'''] __snake_case : str = accelerator.register_save_state_pre_hook(a_ ) __snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Any = '''random''' # make sure loaded weights match with hooks accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks removed load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Union[str, Any] = '''random''' # make sure loaded weights match with hooks removed accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components() __snake_case : Union[str, Any] = None # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertTrue(dummy_obj is None ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() __snake_case : Optional[int] = [1, 2, 3] # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , ) __snake_case : Optional[Any] = Accelerator() # This should work __snake_case : Any = accelerator.prepare(a_ ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Any = Accelerator() with init_empty_weights(): __snake_case : List[str] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : Union[str, Any] = infer_auto_device_map(a_ ) __snake_case : str = '''cpu''' __snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ ) # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Dict = accelerator.prepare(a_ ) @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU} with init_empty_weights(): __snake_case : Any = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : List[Any] = infer_auto_device_map(a_ ) __snake_case : Dict = 1 __snake_case : str = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Any = Accelerator() # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Tuple = accelerator.prepare(a_ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM with init_empty_weights(): __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) __snake_case : Tuple = infer_auto_device_map(a_ ) __snake_case : Tuple = 1 __snake_case : List[Any] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Tuple = Accelerator() # This should work __snake_case : Dict = accelerator.prepare(a_ ) @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = torch.nn.Linear(10 , 10 ) __snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 ) __snake_case : Optional[Any] = Accelerator(cpu=a_ ) __snake_case : str = accelerator.prepare(a_ )
24
1
"""simple docstring""" from __future__ import annotations def lowercase ( _snake_case : list[int] , _snake_case : list[int] , _snake_case : int ) ->tuple[float, list[float]]: """simple docstring""" __snake_case : int = list(range(len(_snake_case ) ) ) __snake_case : Dict = [v / w for v, w in zip(_snake_case , _snake_case )] index.sort(key=lambda _snake_case : ratio[i] , reverse=_snake_case ) __snake_case : float = 0 __snake_case : list[float] = [0] * len(_snake_case ) for i in index: if weight[i] <= capacity: __snake_case : Optional[Any] = 1 max_value += value[i] capacity -= weight[i] else: __snake_case : Union[str, Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" def lowercase ( _snake_case : int ) ->str: """simple docstring""" if number > 0: raise ValueError('''input must be a negative integer''' ) __snake_case : Any = len(bin(_snake_case )[3:] ) __snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:] __snake_case : Dict = ( ( '''1''' + '''0''' * (binary_number_length - len(_snake_case )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
24
1
"""simple docstring""" import csv import tweepy # Twitter API credentials SCREAMING_SNAKE_CASE : Optional[Any] = '''''' SCREAMING_SNAKE_CASE : Optional[int] = '''''' SCREAMING_SNAKE_CASE : List[str] = '''''' SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' def lowercase ( _snake_case : str ) ->None: """simple docstring""" __snake_case : Dict = tweepy.OAuthHandler(_UpperCAmelCase , _UpperCAmelCase ) auth.set_access_token(_UpperCAmelCase , _UpperCAmelCase ) __snake_case : List[Any] = tweepy.API(_UpperCAmelCase ) # initialize a list to hold all the tweepy Tweets __snake_case : Optional[int] = [] # make initial request for most recent tweets (200 is the maximum allowed count) __snake_case : Tuple = api.user_timeline(screen_name=_UpperCAmelCase , count=200 ) # save most recent tweets alltweets.extend(_UpperCAmelCase ) # save the id of the oldest tweet less one __snake_case : List[Any] = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(_UpperCAmelCase ) > 0: print(f"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates __snake_case : str = api.user_timeline( screen_name=_UpperCAmelCase , count=200 , max_id=_UpperCAmelCase ) # save most recent tweets alltweets.extend(_UpperCAmelCase ) # update the id of the oldest tweet less one __snake_case : Union[str, Any] = alltweets[-1].id - 1 print(f"""...{len(_UpperCAmelCase )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv __snake_case : Any = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: __snake_case : int = csv.writer(_UpperCAmelCase ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(_UpperCAmelCase ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
350
"""simple docstring""" def lowercase ( ) ->int: """simple docstring""" return [ a * b * (1_000 - a - b) for a in range(1 , 999 ) for b in range(_snake_case , 999 ) if (a * a + b * b == (1_000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
24
0
"""simple docstring""" import math import random from typing import Any from .hill_climbing import SearchProblem def lowercase ( _snake_case : Any , _snake_case : bool = True , _snake_case : float = math.inf , _snake_case : float = -math.inf , _snake_case : float = math.inf , _snake_case : float = -math.inf , _snake_case : bool = False , _snake_case : float = 100 , _snake_case : float = 0.01 , _snake_case : float = 1 , ) ->Any: """simple docstring""" __snake_case : Any = False __snake_case : Optional[Any] = search_prob __snake_case : str = start_temperate __snake_case : Tuple = [] __snake_case : Tuple = 0 __snake_case : List[str] = None while not search_end: __snake_case : Tuple = current_state.score() if best_state is None or current_score > best_state.score(): __snake_case : Optional[Any] = current_state scores.append(lowercase__ ) iterations += 1 __snake_case : Union[str, Any] = None __snake_case : List[str] = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to __snake_case : List[Any] = random.randint(0 , len(lowercase__ ) - 1 ) # picking a random neighbor __snake_case : Optional[int] = neighbors.pop(lowercase__ ) __snake_case : Any = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: __snake_case : Optional[Any] = change * -1 # in case we are finding minimum if change > 0: # improves the solution __snake_case : Optional[int] = picked_neighbor else: __snake_case : List[Any] = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability __snake_case : Union[str, Any] = picked_neighbor __snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor __snake_case : Any = True else: __snake_case : str = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(lowercase__ ) , lowercase__ ) plt.xlabel('''Iterations''' ) plt.ylabel('''Function values''' ) plt.show() return best_state if __name__ == "__main__": def lowercase ( _snake_case : Tuple , _snake_case : Union[str, Any] ) ->List[str]: """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) SCREAMING_SNAKE_CASE : Any = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) SCREAMING_SNAKE_CASE : Optional[int] = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F'and 50 > y > - 5 found via hill climbing: {local_min.score()}' ) # starting the problem with initial coordinates (12, 47) SCREAMING_SNAKE_CASE : List[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) SCREAMING_SNAKE_CASE : List[Any] = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F'and 50 > y > - 5 found via hill climbing: {local_min.score()}' ) def lowercase ( _snake_case : List[str] , _snake_case : str ) ->List[str]: """simple docstring""" return (3 * x**2) - (6 * y) SCREAMING_SNAKE_CASE : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) SCREAMING_SNAKE_CASE : List[str] = simulated_annealing(prob, find_max=False, visualization=True) print( """The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F'{local_min.score()}' ) SCREAMING_SNAKE_CASE : int = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) SCREAMING_SNAKE_CASE : List[Any] = simulated_annealing(prob, find_max=True, visualization=True) print( """The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F'{local_min.score()}' )
351
"""simple docstring""" def lowercase ( _snake_case : int = 100 ) ->int: """simple docstring""" __snake_case : str = n * (n + 1) * (2 * n + 1) / 6 __snake_case : Dict = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'{solution() = }')
24
0
"""simple docstring""" import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger SCREAMING_SNAKE_CASE : Dict = """<<<<<<< This should probably be modified because it mentions: """ SCREAMING_SNAKE_CASE : Optional[Any] = """=======\n>>>>>>>\n""" SCREAMING_SNAKE_CASE : Tuple = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] SCREAMING_SNAKE_CASE : List[Any] = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def lowercase ( _snake_case : List[Any] ) ->Tuple: """simple docstring""" return ConvertCommand(args.tfds_path , args.datasets_directory ) class _UpperCAmelCase ( _UpperCAmelCase ): '''simple docstring''' @staticmethod def SCREAMING_SNAKE_CASE (a_ ): '''simple docstring''' __snake_case : int = parser.add_parser( '''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , ) train_parser.add_argument( '''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , ) train_parser.add_argument( '''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' ) train_parser.set_defaults(func=_UpperCAmelCase ) def __init__(self , a_ , a_ , *a_ ): '''simple docstring''' __snake_case : List[str] = get_logger('''datasets-cli/converting''' ) __snake_case : Optional[Any] = tfds_path __snake_case : Dict = datasets_directory def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if os.path.isdir(self._tfds_path ): __snake_case : Optional[Any] = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): __snake_case : Optional[int] = os.path.dirname(self._tfds_path ) else: raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' ) __snake_case : int = os.path.abspath(self._datasets_directory ) self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" ) __snake_case : Tuple = [] __snake_case : Dict = [] __snake_case : Any = {} if os.path.isdir(self._tfds_path ): __snake_case : Dict = os.listdir(_UpperCAmelCase ) else: __snake_case : Dict = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f"""Looking at file {f_name}""" ) __snake_case : Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) __snake_case : Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('''Skipping file''' ) continue with open(_UpperCAmelCase , encoding='''utf-8''' ) as f: __snake_case : Tuple = f.readlines() __snake_case : Optional[Any] = [] __snake_case : Dict = False __snake_case : List[str] = False __snake_case : List[Any] = [] for line in lines: __snake_case : List[str] = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: __snake_case : Optional[int] = '''import datasets\n''' elif "import tensorflow" in out_line: # order is important here __snake_case : Dict = '''''' continue elif "from absl import logging" in out_line: __snake_case : Tuple = '''from datasets import logging\n''' elif "getLogger" in out_line: __snake_case : Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): __snake_case : Any = True __snake_case : str = list(filter(lambda a_ : e in out_line , _UpperCAmelCase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' ) out_lines.append(_UpperCAmelCase ) out_lines.append(_UpperCAmelCase ) continue else: for pattern, replacement in TO_CONVERT: __snake_case : List[Any] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: __snake_case : Any = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) ) __snake_case : List[str] = '''from . import ''' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f"""Error converting {out_line.strip()}""" ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: __snake_case : Optional[Any] = True out_lines.append(_UpperCAmelCase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset __snake_case : Dict = f_name.replace('''.py''' , '''''' ) __snake_case : Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) __snake_case : Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) self._logger.info(f"""Adding directory {output_dir}""" ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(_UpperCAmelCase ) if needs_manual_update: with_manual_update.append(_UpperCAmelCase ) with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.writelines(_UpperCAmelCase ) self._logger.info(f"""Converted in {output_file}""" ) for utils_file in utils_files: try: __snake_case : str = os.path.basename(_UpperCAmelCase ) __snake_case : Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )] self._logger.info(f"""Moving {dest_folder} to {utils_file}""" ) shutil.copy(_UpperCAmelCase , _UpperCAmelCase ) except KeyError: self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
352
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__) @dataclass class _UpperCAmelCase ( datasets.BuilderConfig ): '''simple docstring''' lowerCamelCase__ =10000 lowerCamelCase__ =None lowerCamelCase__ =None class _UpperCAmelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' lowerCamelCase__ =ParquetConfig def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __snake_case : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a_ , (str, list, tuple) ): __snake_case : Union[str, Any] = data_files if isinstance(a_ , a_ ): __snake_case : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __snake_case : int = [] for split_name, files in data_files.items(): if isinstance(a_ , a_ ): __snake_case : List[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : int = [dl_manager.iter_files(a_ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(a_ ): with open(a_ , '''rb''' ) as f: __snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) ) break splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) ) return splits def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ): with open(a_ , '''rb''' ) as f: __snake_case : int = pq.ParquetFile(a_ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __snake_case : Dict = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" ) raise
24
0
"""simple docstring""" import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class _UpperCAmelCase ( A__ ): '''simple docstring''' lowerCamelCase__ : str =['image_processor', 'tokenizer'] lowerCamelCase__ : List[Any] ='OwlViTImageProcessor' lowerCamelCase__ : Optional[int] =('CLIPTokenizer', 'CLIPTokenizerFast') def __init__(self , a_=None , a_=None , **a_ ): '''simple docstring''' __snake_case : str = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __snake_case , ) __snake_case : Optional[int] = kwargs.pop('''feature_extractor''' ) __snake_case : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__snake_case , __snake_case ) def __call__(self , a_=None , a_=None , a_=None , a_="max_length" , a_="np" , **a_ ): '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''' ) if text is not None: if isinstance(__snake_case , __snake_case ) or (isinstance(__snake_case , __snake_case ) and not isinstance(text[0] , __snake_case )): __snake_case : List[Any] = [self.tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case , **__snake_case )] elif isinstance(__snake_case , __snake_case ) and isinstance(text[0] , __snake_case ): __snake_case : Optional[Any] = [] # Maximum number of queries across batch __snake_case : Optional[Any] = max([len(__snake_case ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__snake_case ) != max_num_queries: __snake_case : int = t + [''' '''] * (max_num_queries - len(__snake_case )) __snake_case : Any = self.tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case , **__snake_case ) encodings.append(__snake_case ) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' ) if return_tensors == "np": __snake_case : str = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) __snake_case : Optional[int] = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __snake_case : Optional[Any] = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) __snake_case : Tuple = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __snake_case : Any = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 ) __snake_case : Tuple = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __snake_case : str = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) __snake_case : Optional[Any] = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) else: raise ValueError('''Target return tensor type could not be returned''' ) __snake_case : str = BatchEncoding() __snake_case : Optional[Any] = input_ids __snake_case : int = attention_mask if query_images is not None: __snake_case : Dict = BatchEncoding() __snake_case : Dict = self.image_processor( __snake_case , return_tensors=__snake_case , **__snake_case ).pixel_values __snake_case : List[Any] = query_pixel_values if images is not None: __snake_case : Union[str, Any] = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case ) if text is not None and images is not None: __snake_case : Any = image_features.pixel_values return encoding elif query_images is not None and images is not None: __snake_case : Any = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.image_processor.post_process(*__snake_case , **__snake_case ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.image_processor.post_process_object_detection(*__snake_case , **__snake_case ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*__snake_case , **__snake_case ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.batch_decode(*__snake_case , **__snake_case ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.decode(*__snake_case , **__snake_case ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , ) return self.image_processor
353
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): __snake_case : Dict = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = '''sshleifer/tiny-gpt2''' __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = '''sgugger/tiny-distilbert-classification''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , ) __snake_case : Optional[Any] = TensorFlowBenchmark(a_ ) __snake_case : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Any = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ ) __snake_case : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = '''sshleifer/tiny-gpt2''' __snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Dict = TensorFlowBenchmark(a_ , [config] ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : int = TensorFlowBenchmark(a_ ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Dict = AutoConfig.from_pretrained(a_ ) __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random''' __snake_case : Tuple = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] ) __snake_case : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , ) __snake_case : Union[str, Any] = TensorFlowBenchmark(a_ ) benchmark.run() self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(a_ ): self.assertTrue(hasattr(a_ , '''sequential''' ) ) self.assertTrue(hasattr(a_ , '''cumulative''' ) ) self.assertTrue(hasattr(a_ , '''current''' ) ) self.assertTrue(hasattr(a_ , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ ) __snake_case : Optional[int] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
24
0
"""simple docstring""" from graphs.minimum_spanning_tree_kruskal import kruskal def lowercase ( ) ->Optional[Any]: """simple docstring""" __snake_case : Optional[int] = 9 __snake_case : str = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] __snake_case : List[Any] = kruskal(lowerCamelCase__ , lowerCamelCase__ ) __snake_case : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(lowerCamelCase__ ) == sorted(lowerCamelCase__ )
354
"""simple docstring""" import logging import os import threading import time try: import warnings except ImportError: SCREAMING_SNAKE_CASE : Tuple = None try: import msvcrt except ImportError: SCREAMING_SNAKE_CASE : List[str] = None try: import fcntl except ImportError: SCREAMING_SNAKE_CASE : Tuple = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: SCREAMING_SNAKE_CASE : List[str] = OSError # Data # ------------------------------------------------ SCREAMING_SNAKE_CASE : List[Any] = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] SCREAMING_SNAKE_CASE : List[Any] = """3.0.12""" SCREAMING_SNAKE_CASE : int = None def lowercase ( ) ->str: """simple docstring""" global _logger __snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ ) return _logger class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[int] = lock_file return None def __str__(self ): '''simple docstring''' __snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired.""" return temp class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[Any] = lock return None def __enter__(self ): '''simple docstring''' return self.lock def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.lock.release() return None class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long __snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ ) # The path to the lock file. __snake_case : str = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __snake_case : Dict = None # The default timeout value. __snake_case : List[Any] = timeout # We use this lock primarily for the lock counter. __snake_case : Tuple = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __snake_case : Optional[Any] = 0 return None @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._timeout @timeout.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Dict = float(a_ ) return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file_fd is not None def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ): '''simple docstring''' if timeout is None: __snake_case : List[str] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __snake_case : Optional[int] = id(self ) __snake_case : str = self._lock_file __snake_case : Optional[int] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(a_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __snake_case : Optional[int] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def SCREAMING_SNAKE_CASE (self , a_=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __snake_case : Tuple = id(self ) __snake_case : str = self._lock_file logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __snake_case : Dict = 0 logger().debug(f"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__(self ): '''simple docstring''' self.acquire() return self def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.release() return None def __del__(self ): '''simple docstring''' self.release(force=a_ ) return None def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : Any = os.path.basename(a_ ) if len(a_ ) > max_length and max_length > 0: __snake_case : List[Any] = os.path.dirname(a_ ) __snake_case : Any = str(hash(a_ ) ) __snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(a_ , a_ ) else: return path class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) __snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __snake_case : Any = os.open(self._lock_file , a_ ) except OSError: pass else: try: msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(a_ ) else: __snake_case : Dict = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Dict = None msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 ) os.close(a_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC __snake_case : List[str] = os.open(self._lock_file , a_ ) try: fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(a_ ) else: __snake_case : Optional[int] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Tuple = None fcntl.flock(a_ , fcntl.LOCK_UN ) os.close(a_ ) return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __snake_case : Tuple = os.open(self._lock_file , a_ ) except OSError: pass else: __snake_case : List[Any] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' os.close(self._lock_file_fd ) __snake_case : int = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None SCREAMING_SNAKE_CASE : Dict = None if msvcrt: SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock elif fcntl: SCREAMING_SNAKE_CASE : List[str] = UnixFileLock else: SCREAMING_SNAKE_CASE : str = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
24
0
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml SCREAMING_SNAKE_CASE : Optional[int] = NewType("""DataClass""", Any) SCREAMING_SNAKE_CASE : Optional[int] = NewType("""DataClassType""", Any) def lowercase ( _snake_case : Any ) ->Optional[int]: """simple docstring""" if isinstance(__a , __a ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def lowercase ( _snake_case : list ) ->Optional[int]: """simple docstring""" __snake_case : List[Any] = {str(__a ): choice for choice in choices} return lambda _snake_case : str_to_choice.get(__a , __a ) def lowercase ( *, _snake_case : Union[str, List[str]] = None , _snake_case : str = None , _snake_case : Any = dataclasses.MISSING , _snake_case : Callable[[], Any] = dataclasses.MISSING , _snake_case : dict = None , **_snake_case : str , ) ->Optional[int]: """simple docstring""" if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls __snake_case : str = {} if aliases is not None: __snake_case : Optional[Any] = aliases if help is not None: __snake_case : Union[str, Any] = help return dataclasses.field(metadata=__a , default=__a , default_factory=__a , **__a ) class _UpperCAmelCase ( lowercase__ ): '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , **a_ ): '''simple docstring''' if "formatter_class" not in kwargs: __snake_case : List[str] = ArgumentDefaultsHelpFormatter super().__init__(**_a ) if dataclasses.is_dataclass(_a ): __snake_case : Optional[int] = [dataclass_types] __snake_case : Any = list(_a ) for dtype in self.dataclass_types: self._add_dataclass_arguments(_a ) @staticmethod def SCREAMING_SNAKE_CASE (a_ , a_ ): '''simple docstring''' __snake_case : str = f"""--{field.name}""" __snake_case : int = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , _a ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) __snake_case : Union[str, Any] = kwargs.pop('''aliases''' , [] ) if isinstance(_a , _a ): __snake_case : Union[str, Any] = [aliases] __snake_case : str = getattr(field.type , '''__origin__''' , field.type ) if origin_type is Union or (hasattr(_a , '''UnionType''' ) and isinstance(_a , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(_a ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' f""" Problem encountered in field '{field.name}'.""" ) if type(_a ) not in field.type.__args__: # filter `str` in Union __snake_case : List[str] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] __snake_case : List[Any] = getattr(field.type , '''__origin__''' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) __snake_case : Tuple = ( field.type.__args__[0] if isinstance(_a , field.type.__args__[1] ) else field.type.__args__[1] ) __snake_case : Dict = getattr(field.type , '''__origin__''' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) __snake_case : Any = {} if origin_type is Literal or (isinstance(field.type , _a ) and issubclass(field.type , _a )): if origin_type is Literal: __snake_case : Optional[Any] = field.type.__args__ else: __snake_case : List[str] = [x.value for x in field.type] __snake_case : int = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: __snake_case : List[Any] = field.default else: __snake_case : Optional[int] = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument __snake_case : List[str] = copy(_a ) # Hack because type=bool in argparse does not behave as we want. __snake_case : List[Any] = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. __snake_case : List[Any] = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way __snake_case : int = default # This tells argparse we accept 0 or 1 value after --field_name __snake_case : Dict = '?' # This is the value that will get picked if we do --field_name (without value) __snake_case : str = True elif isclass(_a ) and issubclass(_a , _a ): __snake_case : Any = field.type.__args__[0] __snake_case : Optional[int] = '+' if field.default_factory is not dataclasses.MISSING: __snake_case : Union[str, Any] = field.default_factory() elif field.default is dataclasses.MISSING: __snake_case : Optional[Any] = True else: __snake_case : Union[str, Any] = field.type if field.default is not dataclasses.MISSING: __snake_case : Tuple = field.default elif field.default_factory is not dataclasses.MISSING: __snake_case : Union[str, Any] = field.default_factory() else: __snake_case : int = True parser.add_argument(_a , *_a , **_a ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): __snake_case : Optional[Any] = False parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **_a ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if hasattr(_a , '''_argument_group_name''' ): __snake_case : Union[str, Any] = self.add_argument_group(dtype._argument_group_name ) else: __snake_case : Optional[Any] = self try: __snake_case : Dict[str, type] = get_type_hints(_a ) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_a ): __snake_case : Union[str, Any] = '.'.join(map(_a , sys.version_info[:3] ) ) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(_a ): if not field.init: continue __snake_case : Any = type_hints[field.name] self._parse_dataclass_field(_a , _a ) def SCREAMING_SNAKE_CASE (self , a_=None , a_=False , a_=True , a_=None , a_=None , ): '''simple docstring''' if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): __snake_case : Optional[Any] = [] if args_filename: args_files.append(Path(_a ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values __snake_case : int = ArgumentParser() args_file_parser.add_argument(_a , type=_a , action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) __snake_case : Dict = args_file_parser.parse_known_args(args=_a ) __snake_case : str = vars(_a ).get(args_file_flag.lstrip('''-''' ) , _a ) if cmd_args_file_paths: args_files.extend([Path(_a ) for p in cmd_args_file_paths] ) __snake_case : Optional[Any] = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last __snake_case : List[Any] = file_args + args if args is not None else file_args + sys.argv[1:] __snake_case : str = self.parse_known_args(args=_a ) __snake_case : str = [] for dtype in self.dataclass_types: __snake_case : int = {f.name for f in dataclasses.fields(_a ) if f.init} __snake_case : Optional[int] = {k: v for k, v in vars(_a ).items() if k in keys} for k in keys: delattr(_a , _a ) __snake_case : Dict = dtype(**_a ) outputs.append(_a ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(_a ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def SCREAMING_SNAKE_CASE (self , a_ , a_ = False ): '''simple docstring''' __snake_case : Optional[int] = set(args.keys() ) __snake_case : Optional[int] = [] for dtype in self.dataclass_types: __snake_case : List[Any] = {f.name for f in dataclasses.fields(_a ) if f.init} __snake_case : Dict = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) __snake_case : Optional[Any] = dtype(**_a ) outputs.append(_a ) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(_a )}""" ) return tuple(_a ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = False ): '''simple docstring''' with open(Path(_a ) , encoding='''utf-8''' ) as open_json_file: __snake_case : Dict = json.loads(open_json_file.read() ) __snake_case : Union[str, Any] = self.parse_dict(_a , allow_extra_keys=_a ) return tuple(_a ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = False ): '''simple docstring''' __snake_case : List[Any] = self.parse_dict(yaml.safe_load(Path(_a ).read_text() ) , allow_extra_keys=_a ) return tuple(_a )
355
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ): '''simple docstring''' __snake_case : Any = parent __snake_case : int = batch_size __snake_case : Dict = seq_length __snake_case : List[str] = is_training __snake_case : List[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : Union[str, Any] = use_labels __snake_case : str = vocab_size __snake_case : int = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : int = num_attention_heads __snake_case : str = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : int = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : List[Any] = max_position_embeddings __snake_case : Any = type_vocab_size __snake_case : Dict = type_sequence_label_size __snake_case : Optional[Any] = initializer_range __snake_case : Union[str, Any] = num_labels __snake_case : Any = scope __snake_case : Any = range_bbox def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : List[str] = bbox[i, j, 3] __snake_case : Any = bbox[i, j, 1] __snake_case : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : List[str] = bbox[i, j, 2] __snake_case : Union[str, Any] = bbox[i, j, 0] __snake_case : Dict = t __snake_case : Optional[int] = None if self.use_input_mask: __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __snake_case : Dict = None if self.use_token_type_ids: __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : List[str] = None __snake_case : Union[str, Any] = None if self.use_labels: __snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ ) __snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ ) __snake_case : List[str] = model(a_ , bbox=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[int] = self.num_labels __snake_case : List[str] = LiltForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Tuple = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Dict = config_and_inputs __snake_case : Any = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ =( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =False lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' return True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModelTester(self ) __snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Dict = type self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = LiltModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @require_torch @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ ) __snake_case : Dict = torch.tensor([[1, 2]] , device=a_ ) __snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ ) # forward pass with torch.no_grad(): __snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ ) __snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] ) __snake_case : str = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , ) self.assertTrue(outputs.last_hidden_state.shape , a_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
24
0
"""simple docstring""" from __future__ import annotations def lowercase ( _snake_case : int , _snake_case : int ) ->list[list[int]]: """simple docstring""" __snake_case : List[Any] = [] create_all_state(1 , __snake_case , __snake_case , [] , __snake_case ) return result def lowercase ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) ->None: """simple docstring""" if level == 0: total_list.append(current_list[:] ) return for i in range(__snake_case , total_number - level + 2 ): current_list.append(__snake_case ) create_all_state(i + 1 , __snake_case , level - 1 , __snake_case , __snake_case ) current_list.pop() def lowercase ( _snake_case : list[list[int]] ) ->None: """simple docstring""" for i in total_list: print(*__snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Tuple = 4 SCREAMING_SNAKE_CASE : Union[str, Any] = 2 SCREAMING_SNAKE_CASE : Tuple = generate_all_combinations(n, k) print_all_state(total_list)
356
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ): '''simple docstring''' __snake_case : List[Any] = parent __snake_case : List[Any] = batch_size __snake_case : str = seq_length __snake_case : Any = is_training __snake_case : Any = use_input_mask __snake_case : str = use_token_type_ids __snake_case : Dict = use_labels __snake_case : int = vocab_size __snake_case : Union[str, Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : str = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : str = hidden_act __snake_case : Union[str, Any] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : str = max_position_embeddings __snake_case : Dict = type_vocab_size __snake_case : List[Any] = type_sequence_label_size __snake_case : Union[str, Any] = initializer_range __snake_case : str = num_labels __snake_case : Dict = num_choices __snake_case : Optional[int] = scope def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Dict = None if self.use_input_mask: __snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Tuple = None __snake_case : List[str] = None __snake_case : Dict = None if self.use_labels: __snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[str] = DistilBertModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model(a_ , a_ ) __snake_case : List[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : Optional[Any] = model( a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Any = self.num_labels __snake_case : Optional[int] = DistilBertForSequenceClassification(a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = self.num_labels __snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = self.num_choices __snake_case : Any = DistilBertForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : Optional[int] = model( a_ , attention_mask=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs __snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCamelCase__ =( { 'feature-extraction': DistilBertModel, 'fill-mask': DistilBertForMaskedLM, 'question-answering': DistilBertForQuestionAnswering, 'text-classification': DistilBertForSequenceClassification, 'token-classification': DistilBertForTokenClassification, 'zero-shot': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = DistilBertModelTester(self ) __snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Tuple = DistilBertModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __snake_case : List[str] = True __snake_case : Tuple = model_class(config=a_ ) __snake_case : Any = self._prepare_for_class(a_ , a_ ) __snake_case : Dict = torch.jit.trace( a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) ) __snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ ) loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) ) @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __snake_case : List[Any] = model(a_ , attention_mask=a_ )[0] __snake_case : Tuple = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , a_ ) __snake_case : Optional[int] = torch.tensor( [[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
24
0
"""simple docstring""" from __future__ import annotations from scipy.special import comb # type: ignore class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[int] = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. __snake_case : str = len(_snake_case ) - 1 def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." __snake_case : Tuple = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , _snake_case ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(_snake_case ) , 5 ) == 1 return output_values def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." __snake_case : List[str] = self.basis_function(_snake_case ) __snake_case : Optional[int] = 0.0 __snake_case : Optional[int] = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def SCREAMING_SNAKE_CASE (self , a_ = 0.01 ): '''simple docstring''' from matplotlib import pyplot as plt # type: ignore __snake_case : List[Any] = [] # x coordinates of points to plot __snake_case : List[str] = [] # y coordinates of points to plot __snake_case : Any = 0.0 while t <= 1: __snake_case : int = self.bezier_curve_function(_snake_case ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size __snake_case : List[Any] = [i[0] for i in self.list_of_points] __snake_case : Tuple = [i[1] for i in self.list_of_points] plt.plot( _snake_case , _snake_case , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , ) plt.scatter(_snake_case , _snake_case , color='''red''' , label='''Control Points''' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
357
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]: """simple docstring""" def get_masked_lm_array(_snake_case : str ): __snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : str = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Any = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_array(_snake_case : str ): __snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_layer_array(_snake_case : int , _snake_case : str ): __snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[Any] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ): __snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case ) __snake_case : int = array.reshape(_snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) print(f"""Loading model based on config from {config_path}...""" ) __snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case ) __snake_case : Dict = BertForMaskedLM(_snake_case ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __snake_case : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention __snake_case : BertSelfAttention = layer.attention.self __snake_case : int = get_encoder_attention_layer_array( _snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape ) __snake_case : List[Any] = get_encoder_attention_layer_array( _snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape ) __snake_case : Union[str, Any] = get_encoder_attention_layer_array( _snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape ) # Self-attention Output __snake_case : BertSelfOutput = layer.attention.output __snake_case : Dict = get_encoder_attention_layer_array( _snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape ) __snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' ) __snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' ) # Intermediate __snake_case : BertIntermediate = layer.intermediate __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' ) __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' ) # Output __snake_case : BertOutput = layer.output __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' ) __snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' ) __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' ) __snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' ) # Embeddings __snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' ) __snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' ) __snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' ) __snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' ) # LM Head __snake_case : Optional[Any] = model.cls.predictions.transform __snake_case : Dict = get_masked_lm_array('''dense/kernel''' ) __snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' ) __snake_case : str = get_masked_lm_array('''layer_norm/gamma''' ) __snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' ) __snake_case : Tuple = get_masked_lm_array('''embedding_table''' ) # Pooling __snake_case : Optional[Any] = BertPooler(config=_snake_case ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' ) # Export final model model.save_pretrained(_snake_case ) # Integration test - should load without any errors ;) __snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case ) print(new_model.eval() ) print('''Model conversion was done sucessfully!''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model.""", ) SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
24
0
"""simple docstring""" import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) SCREAMING_SNAKE_CASE : Union[str, Any] = 'hf-internal-testing/tiny-random-bert' SCREAMING_SNAKE_CASE : Tuple = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") SCREAMING_SNAKE_CASE : Optional[int] = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6' class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = cached_file(_UpperCamelCase , _UpperCamelCase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_UpperCamelCase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_UpperCamelCase , _UpperCamelCase ) ) ) with open(os.path.join(_UpperCamelCase , '''refs''' , '''main''' ) ) as f: __snake_case : Union[str, Any] = f.read() self.assertEqual(_UpperCamelCase , os.path.join(_UpperCamelCase , '''snapshots''' , _UpperCamelCase , _UpperCamelCase ) ) self.assertTrue(os.path.isfile(_UpperCamelCase ) ) # File is cached at the same place the second time. __snake_case : int = cached_file(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) # Using a specific revision to test the full commit hash. __snake_case : Union[str, Any] = cached_file(_UpperCamelCase , _UpperCamelCase , revision='''9b8c223''' ) self.assertEqual(_UpperCamelCase , os.path.join(_UpperCamelCase , '''snapshots''' , _UpperCamelCase , _UpperCamelCase ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid model identifier''' ): __snake_case : Optional[int] = cached_file('''tiny-random-bert''' , _UpperCamelCase ) with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid git identifier''' ): __snake_case : Tuple = cached_file(_UpperCamelCase , _UpperCamelCase , revision='''aaaa''' ) with self.assertRaisesRegex(_UpperCamelCase , '''does not appear to have a file named''' ): __snake_case : Dict = cached_file(_UpperCamelCase , '''conf''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' with self.assertRaisesRegex(_UpperCamelCase , '''does not appear to have a file named''' ): __snake_case : List[str] = cached_file(_UpperCamelCase , '''conf''' ) with open(os.path.join(_UpperCamelCase , '''refs''' , '''main''' ) ) as f: __snake_case : str = f.read() self.assertTrue(os.path.isfile(os.path.join(_UpperCamelCase , '''.no_exist''' , _UpperCamelCase , '''conf''' ) ) ) __snake_case : Dict = cached_file(_UpperCamelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCamelCase ) self.assertIsNone(_UpperCamelCase ) __snake_case : List[Any] = cached_file(_UpperCamelCase , '''conf''' , local_files_only=_UpperCamelCase , _raise_exceptions_for_missing_entries=_UpperCamelCase ) self.assertIsNone(_UpperCamelCase ) __snake_case : Union[str, Any] = mock.Mock() __snake_case : List[Any] = 5_00 __snake_case : List[Any] = {} __snake_case : Union[str, Any] = HTTPError __snake_case : Optional[Any] = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=_UpperCamelCase ) as mock_head: __snake_case : Tuple = cached_file(_UpperCamelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCamelCase ) self.assertIsNone(_UpperCamelCase ) # This check we did call the fake head request mock_head.assert_called() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , _UpperCamelCase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , _UpperCamelCase , revision='''ahaha''' ) __snake_case : Dict = get_file_from_repo('''bert-base-cased''' , _UpperCamelCase ) # The name is the cached name which is not very easy to test, so instead we load the content. __snake_case : int = json.loads(open(_UpperCamelCase , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_68 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Optional[int] = Path(_UpperCamelCase ) / """a.txt""" filename.touch() self.assertEqual(get_file_from_repo(_UpperCamelCase , '''a.txt''' ) , str(_UpperCamelCase ) ) self.assertIsNone(get_file_from_repo(_UpperCamelCase , '''b.txt''' ) )
358
"""simple docstring""" import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_ , a_ = None , a_ = None , a_ = False , **a_ , ): '''simple docstring''' super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ ) __snake_case : Union[str, Any] = Sql( cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = None __snake_case : Dict = None __snake_case : Dict = None __snake_case : List[str] = None self.builder.download_and_prepare( download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , ) # Build dataset for splits __snake_case : Any = self.builder.as_dataset( split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory ) return dataset class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_ , a_ , a_ = None , a_ = None , **a_ , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" ) __snake_case : List[str] = dataset __snake_case : Tuple = name __snake_case : Optional[int] = con __snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __snake_case : Dict = num_proc __snake_case : Dict = to_sql_kwargs def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ ) __snake_case : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ ) __snake_case : Any = self.to_sql_kwargs.pop('''index''' , a_ ) __snake_case : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs ) return written def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case , __snake_case , __snake_case : Optional[Any] = args __snake_case : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs __snake_case : Dict = query_table( table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , ) __snake_case : Tuple = batch.to_pandas() __snake_case : str = df.to_sql(self.name , self.con , index=a_ , **a_ ) return num_rows or len(a_ ) def SCREAMING_SNAKE_CASE (self , a_ , **a_ ): '''simple docstring''' __snake_case : int = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: __snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
24
0
"""simple docstring""" import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py SCREAMING_SNAKE_CASE : Any = """src/diffusers""" # Matches is_xxx_available() SCREAMING_SNAKE_CASE : Dict = re.compile(r"""is\_([a-z_]*)_available\(\)""") # Matches from xxx import bla SCREAMING_SNAKE_CASE : List[str] = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") SCREAMING_SNAKE_CASE : Tuple = """ {0} = None """ SCREAMING_SNAKE_CASE : Optional[Any] = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) """ SCREAMING_SNAKE_CASE : Optional[int] = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ def lowercase ( _snake_case : str ) ->Any: """simple docstring""" __snake_case : Optional[int] = _re_backend.findall(_snake_case ) if len(_snake_case ) == 0: return None return "_and_".join(_snake_case ) def lowercase ( ) ->Dict: """simple docstring""" with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __snake_case : Tuple = f.readlines() # Get to the point we do the actual imports for type checking __snake_case : int = 0 __snake_case : Any = {} # Go through the end of the file while line_index < len(_snake_case ): # If the line contains is_backend_available, we grab all objects associated with the `else` block __snake_case : Dict = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('''else:''' ): line_index += 1 line_index += 1 __snake_case : List[str] = [] # Until we unindent, add backend objects to the list while line_index < len(_snake_case ) and len(lines[line_index] ) > 1: __snake_case : Optional[int] = lines[line_index] __snake_case : Optional[int] = _re_single_line_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(_snake_case ) > 0: __snake_case : Dict = objects else: line_index += 1 return backend_specific_objects def lowercase ( _snake_case : Any , _snake_case : int ) ->List[Any]: """simple docstring""" if name.isupper(): return DUMMY_CONSTANT.format(_snake_case ) elif name.islower(): return DUMMY_FUNCTION.format(_snake_case , _snake_case ) else: return DUMMY_CLASS.format(_snake_case , _snake_case ) def lowercase ( _snake_case : Union[str, Any]=None ) ->Optional[int]: """simple docstring""" if backend_specific_objects is None: __snake_case : Optional[int] = read_init() # For special correspondence backend to module name as used in the function requires_modulename __snake_case : Optional[Any] = {} for backend, objects in backend_specific_objects.items(): __snake_case : Tuple = '''[''' + ''', '''.join(f"""\"{b}\"""" for b in backend.split('''_and_''' ) ) + ''']''' __snake_case : Dict = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n''' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(_snake_case , _snake_case ) for o in objects] ) __snake_case : Dict = dummy_file return dummy_files def lowercase ( _snake_case : Optional[Any]=False ) ->str: """simple docstring""" __snake_case : int = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py __snake_case : Any = {'''torch''': '''pt'''} # Locate actual dummy modules and read their content. __snake_case : str = os.path.join(_snake_case , '''utils''' ) __snake_case : int = { backend: os.path.join(_snake_case , f"""dummy_{short_names.get(_snake_case , _snake_case )}_objects.py""" ) for backend in dummy_files.keys() } __snake_case : Tuple = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(_snake_case ): with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __snake_case : List[Any] = f.read() else: __snake_case : Optional[Any] = '''''' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f"""Updating diffusers.utils.dummy_{short_names.get(_snake_case , _snake_case )}_objects.py as the main """ '''__init__ has new objects.''' ) with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( '''The main __init__ has objects that are not present in ''' f"""diffusers.utils.dummy_{short_names.get(_snake_case , _snake_case )}_objects.py. Run `make fix-copies` """ '''to fix this.''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") SCREAMING_SNAKE_CASE : str = parser.parse_args() check_dummies(args.fix_and_overwrite)
359
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[int] = { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""", } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='lxmert' lowerCamelCase__ ={} def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = vocab_size __snake_case : List[str] = hidden_size __snake_case : List[Any] = num_attention_heads __snake_case : int = hidden_act __snake_case : int = intermediate_size __snake_case : Any = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Tuple = max_position_embeddings __snake_case : List[str] = type_vocab_size __snake_case : str = initializer_range __snake_case : Tuple = layer_norm_eps __snake_case : List[Any] = num_qa_labels __snake_case : int = num_object_labels __snake_case : Optional[Any] = num_attr_labels __snake_case : Union[str, Any] = l_layers __snake_case : Optional[int] = x_layers __snake_case : Optional[int] = r_layers __snake_case : Tuple = visual_feat_dim __snake_case : Optional[int] = visual_pos_dim __snake_case : Dict = visual_loss_normalizer __snake_case : str = task_matched __snake_case : Optional[Any] = task_mask_lm __snake_case : List[str] = task_obj_predict __snake_case : Optional[Any] = task_qa __snake_case : Any = visual_obj_loss __snake_case : int = visual_attr_loss __snake_case : List[Any] = visual_feat_loss __snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**a_ )
24
0
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[Any] = value __snake_case : Node | None = None __snake_case : Node | None = None class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : int = tree def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__(self ): '''simple docstring''' yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
360
"""simple docstring""" def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" __snake_case : Tuple = len(_snake_case ) __snake_case : str = sum(_snake_case ) __snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __snake_case : Optional[Any] = True for i in range(1 , s + 1 ): __snake_case : int = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __snake_case : Union[str, Any] = dp[i][j - 1] if arr[i - 1] <= j: __snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __snake_case : List[str] = s - 2 * j break return diff
24
0
"""simple docstring""" from __future__ import annotations import typing from collections.abc import Iterable import numpy as np SCREAMING_SNAKE_CASE : Union[str, Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 SCREAMING_SNAKE_CASE : Optional[Any] = typing.Union[np.floataa, int, float] # noqa: UP007 def lowercase ( _snake_case : Vector , _snake_case : Vector ) ->VectorOut: """simple docstring""" return np.sqrt(np.sum((np.asarray(lowerCAmelCase__ ) - np.asarray(lowerCAmelCase__ )) ** 2 ) ) def lowercase ( _snake_case : Vector , _snake_case : Vector ) ->VectorOut: """simple docstring""" return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) ** (1 / 2) if __name__ == "__main__": def lowercase ( ) ->None: """simple docstring""" from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) benchmark()
361
"""simple docstring""" from collections.abc import Callable def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float: """simple docstring""" __snake_case : float = a __snake_case : float = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: __snake_case : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: __snake_case : List[str] = mid else: __snake_case : str = mid __snake_case : str = start + (end - start) / 2.0 return mid def lowercase ( _snake_case : float ) ->float: """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
24
0
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =['image_processor', 'tokenizer'] lowerCamelCase__ ='CLIPImageProcessor' lowerCamelCase__ =('CLIPTokenizer', 'CLIPTokenizerFast') def __init__(self , a_=None , a_=None , **a_ ): '''simple docstring''' __snake_case : Any = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a_ , ) __snake_case : int = kwargs.pop('''feature_extractor''' ) __snake_case : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a_ , a_ ) def __call__(self , a_=None , a_=None , a_=None , **a_ ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if images is not None: __snake_case : Optional[Any] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: __snake_case : Any = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.batch_decode(*a_ , **a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.decode(*a_ , **a_ ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.tokenizer.model_input_names __snake_case : List[str] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a_ , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a_ , ) return self.image_processor
362
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[str] = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : str = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
24
0
"""simple docstring""" import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. SCREAMING_SNAKE_CASE : Any = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def lowercase ( _snake_case : str ) ->List[str]: """simple docstring""" config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def lowercase ( _snake_case : Union[str, Any] ) ->List[Any]: """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_lowerCamelCase ) def lowercase ( _snake_case : Dict ) ->List[str]: """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main __snake_case : Dict = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase ) def lowercase ( _snake_case : Dict , _snake_case : Union[str, Any] ) ->Optional[int]: """simple docstring""" if exitstatus == 5: __snake_case : List[Any] = 0 # Doctest custom flag to ignore output. SCREAMING_SNAKE_CASE : Optional[int] = doctest.register_optionflag("""IGNORE_RESULT""") SCREAMING_SNAKE_CASE : Tuple = doctest.OutputChecker class _UpperCAmelCase ( __lowercase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , a_ , a_ , a_ ) SCREAMING_SNAKE_CASE : str = CustomOutputChecker SCREAMING_SNAKE_CASE : Optional[Any] = HfDoctestModule SCREAMING_SNAKE_CASE : Optional[int] = HfDocTestParser
363
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =['image_processor', 'tokenizer'] lowerCamelCase__ ='CLIPImageProcessor' lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self , a_=None , a_=None , **a_ ): '''simple docstring''' __snake_case : Any = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a_ , ) __snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' ) __snake_case : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a_ , a_ ) def __call__(self , a_=None , a_=None , a_=None , **a_ ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if images is not None: __snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: __snake_case : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.batch_decode(*a_ , **a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.decode(*a_ , **a_ ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.tokenizer.model_input_names __snake_case : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
24
0
"""simple docstring""" from math import ceil def lowercase ( _snake_case : List[str] , _snake_case : Optional[Any] ) ->str: """simple docstring""" __snake_case : List[Any] = list(range(0 , a_ ) ) __snake_case : Optional[Any] = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check __snake_case : Optional[int] = [] for i in device_map_blocks: if device_map_blocks.count(a_ ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(a_ ) # Missing blocks __snake_case : List[str] = [i for i in blocks if i not in device_map_blocks] __snake_case : Tuple = [i for i in device_map_blocks if i not in blocks] if len(a_ ) != 0: raise ValueError( '''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.''' ''' These attention blocks were specified more than once: ''' + str(a_ ) ) if len(a_ ) != 0: raise ValueError( '''There are attention blocks for this model that are not specified in the device_map. Add these attention ''' '''blocks to a device on the device_map: ''' + str(a_ ) ) if len(a_ ) != 0: raise ValueError( '''The device_map contains more attention blocks than this model has. Remove these from the device_map:''' + str(a_ ) ) def lowercase ( _snake_case : Optional[Any] , _snake_case : List[Any] ) ->Optional[int]: """simple docstring""" __snake_case : Optional[int] = list(range(a_ ) ) __snake_case : Union[str, Any] = int(ceil(n_layers / len(a_ ) ) ) __snake_case : int = [layers[i : i + n_blocks] for i in range(0 , a_ , a_ )] return dict(zip(a_ , a_ ) )
364
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE : List[Any] = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE : Tuple = { """facebook/mbart-large-en-ro""": 1024, """facebook/mbart-large-cc25""": 1024, } # fmt: off SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =['input_ids', 'attention_mask'] lowerCamelCase__ =MBartTokenizer lowerCamelCase__ =[] lowerCamelCase__ =[] def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token super().__init__( vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , ) __snake_case : Tuple = vocab_file __snake_case : Optional[Any] = False if not self.vocab_file else True __snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) __snake_case : Optional[int] = { lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX''' __snake_case : Any = self.convert_tokens_to_ids(self._src_lang ) __snake_case : Dict = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' __snake_case : Tuple = [self.sep_token_id] __snake_case : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __snake_case : Optional[int] = src_lang __snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ ) __snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ ) __snake_case : int = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ): '''simple docstring''' __snake_case : int = src_lang __snake_case : List[Any] = tgt_lang return super().prepare_seqaseq_batch(a_ , a_ , **a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : List[Any] = [] __snake_case : Any = [self.eos_token_id, self.cur_lang_code] __snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : Optional[Any] = [] __snake_case : Dict = [self.eos_token_id, self.cur_lang_code] __snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(a_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return __snake_case : Optional[Any] = os.path.join( a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file , a_ ) return (out_vocab_file,)
24
0
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## SCREAMING_SNAKE_CASE : List[str] = 16 SCREAMING_SNAKE_CASE : Optional[int] = 32 def lowercase ( _snake_case : Accelerator , _snake_case : int = 16 ) ->Union[str, Any]: """simple docstring""" __snake_case : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __snake_case : Tuple = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(_snake_case : Tuple ): # max_length=None => use the model max length (it's actually the default) __snake_case : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __snake_case : List[str] = datasets.map( __a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __snake_case : Dict = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_snake_case : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. __snake_case : str = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __snake_case : List[Any] = 16 elif accelerator.mixed_precision != "no": __snake_case : List[str] = 8 else: __snake_case : Union[str, Any] = None return tokenizer.pad( __a , padding='''longest''' , max_length=__a , pad_to_multiple_of=__a , return_tensors='''pt''' , ) # Instantiate dataloaders. __snake_case : Any = DataLoader( tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a ) __snake_case : Any = DataLoader( tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders SCREAMING_SNAKE_CASE : Optional[Any] = mocked_dataloaders # noqa: F811 def lowercase ( _snake_case : List[Any] , _snake_case : Optional[Any] ) ->Any: """simple docstring""" if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __a ) == "1": __snake_case : Optional[Any] = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: __snake_case : Tuple = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: __snake_case : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __snake_case : int = config['''lr'''] __snake_case : Optional[int] = int(config['''num_epochs'''] ) __snake_case : Optional[Any] = int(config['''seed'''] ) __snake_case : Dict = int(config['''batch_size'''] ) set_seed(__a ) __snake_case : Optional[int] = get_dataloaders(__a , __a ) __snake_case : List[str] = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation __snake_case : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __snake_case : Tuple = batch_size // MAX_GPU_BATCH_SIZE __snake_case : Dict = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) __snake_case : Any = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__a ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __snake_case : Dict = model.to(accelerator.device ) # Instantiate optimizer __snake_case : Tuple = AdamW(params=model.parameters() , lr=__a ) # Instantiate scheduler __snake_case : Dict = get_linear_schedule_with_warmup( optimizer=__a , num_warmup_steps=100 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __snake_case : Tuple = accelerator.prepare( __a , __a , __a , __a , __a ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: __snake_case : Optional[int] = os.path.split(__a )[-1].split('''.''' )[0] accelerator.init_trackers(__a , __a ) # Now we train the model for epoch in range(__a ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: __snake_case : Tuple = 0 for step, batch in enumerate(__a ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __snake_case : int = model(**__a ) __snake_case : Optional[int] = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() __snake_case : Optional[Any] = loss / gradient_accumulation_steps accelerator.backward(__a ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__a ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): __snake_case : Optional[int] = model(**__a ) __snake_case : Union[str, Any] = outputs.logits.argmax(dim=-1 ) __snake_case : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__a , references=__a , ) __snake_case : Optional[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , __a ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(__a ), '''epoch''': epoch, } , step=__a , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def lowercase ( ) ->Optional[int]: """simple docstring""" __snake_case : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__a , default=__a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=__a , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) __snake_case : Optional[int] = parser.parse_args() __snake_case : Union[str, Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__a , __a ) if __name__ == "__main__": main()
365
"""simple docstring""" import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__) @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None if is_torch_available(): import torch from torch.utils.data import Dataset class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ): '''simple docstring''' __snake_case : Any = hans_processors[task]() __snake_case : int = os.path.join( a_ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , ) __snake_case : Tuple = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Dict = label_list[2], label_list[1] __snake_case : Any = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case : int = cached_features_file + '''.lock''' with FileLock(a_ ): if os.path.exists(a_ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case : Union[str, Any] = torch.load(a_ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case : Dict = ( processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) ) logger.info('''Training examples: %s''' , len(a_ ) ) __snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) logger.info('''Saving features into cached file %s''' , a_ ) torch.save(self.features , a_ ) def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list if is_tf_available(): import tensorflow as tf class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ): '''simple docstring''' __snake_case : List[Any] = hans_processors[task]() __snake_case : str = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Tuple = label_list[2], label_list[1] __snake_case : Dict = label_list __snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) __snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_00_00 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case : Union[str, Any] = tf.data.Dataset.from_generator( a_ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.dataset def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return ["contradiction", "entailment", "neutral"] def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = [] for i, line in enumerate(a_ ): if i == 0: continue __snake_case : Tuple = '''%s-%s''' % (set_type, line[0]) __snake_case : Dict = line[5] __snake_case : int = line[6] __snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case : List[Any] = line[0] examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) ) return examples def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]: """simple docstring""" __snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )} __snake_case : Tuple = [] for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ): if ex_index % 10_000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case : List[Any] = tokenizer( example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , ) __snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0 __snake_case : Union[str, Any] = int(example.pairID ) features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features SCREAMING_SNAKE_CASE : Dict = { """hans""": 3, } SCREAMING_SNAKE_CASE : str = { """hans""": HansProcessor, }
24
0
"""simple docstring""" import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =PriorTransformer lowerCamelCase__ ="hidden_states" @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = 4 __snake_case : Optional[int] = 8 __snake_case : Tuple = 7 __snake_case : Optional[Any] = floats_tensor((batch_size, embedding_dim) ).to(__UpperCAmelCase ) __snake_case : Any = floats_tensor((batch_size, embedding_dim) ).to(__UpperCAmelCase ) __snake_case : str = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def SCREAMING_SNAKE_CASE (self , a_=0 ): '''simple docstring''' torch.manual_seed(__UpperCAmelCase ) __snake_case : Optional[int] = 4 __snake_case : List[Any] = 8 __snake_case : Dict = 7 __snake_case : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(__UpperCAmelCase ) __snake_case : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(__UpperCAmelCase ) __snake_case : Optional[int] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return (4, 8) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return (4, 8) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = { """num_attention_heads""": 2, """attention_head_dim""": 4, """num_layers""": 2, """embedding_dim""": 8, """num_embeddings""": 7, """additional_embeddings""": 4, } __snake_case : str = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = PriorTransformer.from_pretrained( '''hf-internal-testing/prior-dummy''' , output_loading_info=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(__UpperCAmelCase ) __snake_case : List[Any] = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = self.prepare_init_args_and_inputs_for_common() __snake_case : Any = self.model_class(**__UpperCAmelCase ) __snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : int = [*signature.parameters.keys()] __snake_case : Optional[int] = ["""hidden_states""", """timestep"""] self.assertListEqual(arg_names[:2] , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' ) __snake_case : Optional[Any] = model.to(__UpperCAmelCase ) if hasattr(__UpperCAmelCase , '''set_default_attn_processor''' ): model.set_default_attn_processor() __snake_case : List[str] = self.get_dummy_seed_input() with torch.no_grad(): __snake_case : Dict = model(**__UpperCAmelCase )[0] __snake_case : Union[str, Any] = output[0, :5].flatten().cpu() print(__UpperCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __snake_case : Optional[int] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] ) self.assertTrue(torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-2 ) ) @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_=1 , a_=7_68 , a_=77 , a_=0 ): '''simple docstring''' torch.manual_seed(__UpperCAmelCase ) __snake_case : Any = batch_size __snake_case : str = embedding_dim __snake_case : str = num_embeddings __snake_case : List[str] = torch.randn((batch_size, embedding_dim) ).to(__UpperCAmelCase ) __snake_case : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(__UpperCAmelCase ) __snake_case : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__UpperCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]], [37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]], # fmt: on ] ) def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : Optional[Any] = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''' ) model.to(__UpperCAmelCase ) __snake_case : List[str] = self.get_dummy_seed_input(seed=__UpperCAmelCase ) with torch.no_grad(): __snake_case : List[str] = model(**__UpperCAmelCase )[0] assert list(sample.shape ) == [1, 7_68] __snake_case : str = sample[0, :8].flatten().cpu() print(__UpperCAmelCase ) __snake_case : Tuple = torch.tensor(__UpperCAmelCase ) assert torch_all_close(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
366
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[str] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='gptsan-japanese' lowerCamelCase__ =[ 'past_key_values', ] lowerCamelCase__ ={ 'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__(self , a_=3_60_00 , a_=12_80 , a_=10_24 , a_=81_92 , a_=40_96 , a_=1_28 , a_=10 , a_=0 , a_=16 , a_=16 , a_=1_28 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_59_98 , a_=3_59_95 , a_=3_59_99 , **a_ , ): '''simple docstring''' __snake_case : Any = vocab_size __snake_case : str = max_position_embeddings __snake_case : Any = d_model __snake_case : List[str] = d_ff __snake_case : Dict = d_ext __snake_case : Optional[Any] = d_spout __snake_case : int = num_switch_layers __snake_case : List[Any] = num_ext_layers __snake_case : Any = num_switch_layers + num_ext_layers __snake_case : Optional[int] = num_heads __snake_case : Tuple = num_experts __snake_case : List[Any] = expert_capacity __snake_case : Dict = dropout_rate __snake_case : Optional[Any] = layer_norm_epsilon __snake_case : Dict = router_bias __snake_case : str = router_jitter_noise __snake_case : List[str] = router_dtype __snake_case : Union[str, Any] = router_ignore_padding_tokens __snake_case : List[str] = output_hidden_states __snake_case : Optional[Any] = output_attentions __snake_case : Any = initializer_factor __snake_case : int = output_router_logits __snake_case : Union[str, Any] = use_cache super().__init__( separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
24
0
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[str] = { """huggingface/time-series-transformer-tourism-monthly""": ( """https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json""" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='''time_series_transformer''' lowerCamelCase__ ={ '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__(self , a_ = None , a_ = None , a_ = "student_t" , a_ = "nll" , a_ = 1 , a_ = [1, 2, 3, 4, 5, 6, 7] , a_ = "mean" , a_ = 0 , a_ = 0 , a_ = 0 , a_ = 0 , a_ = None , a_ = None , a_ = 32 , a_ = 32 , a_ = 2 , a_ = 2 , a_ = 2 , a_ = 2 , a_ = True , a_ = "gelu" , a_ = 64 , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1_00 , a_ = 0.02 , a_=True , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = prediction_length __snake_case : Optional[Any] = context_length or prediction_length __snake_case : Optional[Any] = distribution_output __snake_case : Union[str, Any] = loss __snake_case : Dict = input_size __snake_case : int = num_time_features __snake_case : Any = lags_sequence __snake_case : Dict = scaling __snake_case : Tuple = num_dynamic_real_features __snake_case : Dict = num_static_real_features __snake_case : Union[str, Any] = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase__ ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) __snake_case : Optional[int] = cardinality else: __snake_case : Optional[Any] = [0] if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase__ ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) __snake_case : List[Any] = embedding_dimension else: __snake_case : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] __snake_case : str = num_parallel_samples # Transformer architecture configuration __snake_case : Union[str, Any] = input_size * len(lowerCAmelCase__ ) + self._number_of_features __snake_case : str = d_model __snake_case : Optional[Any] = encoder_attention_heads __snake_case : Dict = decoder_attention_heads __snake_case : List[Any] = encoder_ffn_dim __snake_case : str = decoder_ffn_dim __snake_case : Dict = encoder_layers __snake_case : str = decoder_layers __snake_case : Any = dropout __snake_case : str = attention_dropout __snake_case : List[Any] = activation_dropout __snake_case : Dict = encoder_layerdrop __snake_case : Any = decoder_layerdrop __snake_case : Optional[Any] = activation_function __snake_case : Tuple = init_std __snake_case : List[str] = use_cache super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
367
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } SCREAMING_SNAKE_CASE : int = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def lowercase ( _snake_case : Optional[int] ) ->int: """simple docstring""" __snake_case : int = {} with open(_snake_case , '''r''' ) as file: for line_number, line in enumerate(_snake_case ): __snake_case : Union[str, Any] = line.strip() if line: __snake_case : str = line.split() __snake_case : Union[str, Any] = line_number __snake_case : Dict = words[0] __snake_case : str = value return result def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]: """simple docstring""" for attribute in key.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : str = '''param''' if weight_type is not None and weight_type != "param": __snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape elif weight_type is not None and weight_type == "param": __snake_case : Optional[Any] = hf_pointer for attribute in hf_param_name.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : List[str] = shape_pointer.shape # let's reduce dimension __snake_case : int = value[0] else: __snake_case : int = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __snake_case : List[Any] = value elif weight_type == "weight_g": __snake_case : Tuple = value elif weight_type == "weight_v": __snake_case : str = value elif weight_type == "bias": __snake_case : str = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __snake_case : List[Any] = getattr(_snake_case , _snake_case ) __snake_case : int = value else: __snake_case : List[Any] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int: """simple docstring""" __snake_case : Optional[Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : List[str] = '''param''' if weight_type is not None and weight_type != "param": __snake_case : str = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __snake_case : Tuple = '''.'''.join([key, hf_param_name] ) else: __snake_case : Optional[int] = key __snake_case : List[Any] = value if '''lm_head''' in full_key else value[0] SCREAMING_SNAKE_CASE : Tuple = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict: """simple docstring""" __snake_case : Tuple = False for key, mapped_key in MAPPING.items(): __snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __snake_case : int = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2] __snake_case : Tuple = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: __snake_case : Union[str, Any] = '''weight_g''' elif "weight_v" in name: __snake_case : List[str] = '''weight_v''' elif "bias" in name: __snake_case : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case : List[Any] = '''weight''' else: __snake_case : Union[str, Any] = None if hf_dict is not None: rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) else: set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) return is_used return is_used def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any: """simple docstring""" __snake_case : Union[str, Any] = [] __snake_case : Union[str, Any] = fairseq_model.state_dict() __snake_case : str = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __snake_case : str = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) __snake_case : Union[str, Any] = True else: __snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case ) if not is_used: unused_weights.append(_snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1] __snake_case : str = name.split('''.''' ) __snake_case : Optional[int] = int(items[0] ) __snake_case : Any = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __snake_case : int = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __snake_case : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_snake_case ) @torch.no_grad() def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict: """simple docstring""" if config_path is not None: __snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case ) else: __snake_case : Tuple = WavaVecaConfig() if is_seq_class: __snake_case : Optional[int] = read_txt_into_dict(_snake_case ) __snake_case : List[Any] = idalabel __snake_case : int = WavaVecaForSequenceClassification(_snake_case ) __snake_case : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) feature_extractor.save_pretrained(_snake_case ) elif is_finetuned: if dict_path: __snake_case : int = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Tuple = target_dict.pad_index __snake_case : int = target_dict.bos_index __snake_case : Tuple = target_dict.eos_index __snake_case : Optional[Any] = len(target_dict.symbols ) __snake_case : Any = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) __snake_case : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched __snake_case : Dict = 0 __snake_case : List[Any] = 1 with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_snake_case , _snake_case ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) __snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False __snake_case : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) __snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) __snake_case : Optional[int] = WavaVecaForCTC(_snake_case ) else: __snake_case : Tuple = WavaVecaForPreTraining(_snake_case ) if is_finetuned or is_seq_class: __snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' ) __snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case ) __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case ) __snake_case : int = model[0].eval() recursively_load_weights(_snake_case , _snake_case , not is_finetuned ) hf_wavavec.save_pretrained(_snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) SCREAMING_SNAKE_CASE : Any = parser.parse_args() SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
24
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE : Dict = { 'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[Any] = [ 'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'GraphormerForGraphClassification', 'GraphormerModel', 'GraphormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
368
"""simple docstring""" from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=__snake_case ): '''simple docstring''' lowerCamelCase__ =['transformers', 'torch', 'note_seq'] def __init__(self , *a_ , **a_ ): '''simple docstring''' requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
24
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name SCREAMING_SNAKE_CASE : Any = ''' Examples: ```py >>> from PIL import Image >>> import torch >>> from diffusers import DiffusionPipeline >>> from diffusers.utils import export_to_gif, load_image >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") >>> repo = "openai/shap-e-img2img" >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) >>> pipe = pipe.to(device) >>> guidance_scale = 3.0 >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png" >>> image = load_image(image_url).convert("RGB") >>> images = pipe( ... image, ... guidance_scale=guidance_scale, ... num_inference_steps=64, ... frame_size=256, ... ).images >>> gif_path = export_to_gif(images[0], "corgi_3d.gif") ``` ''' @dataclass class _UpperCAmelCase ( a__ ): '''simple docstring''' lowerCamelCase__ =42 class _UpperCAmelCase ( a__ ): '''simple docstring''' def __init__(self , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' super().__init__() self.register_modules( prior=_lowerCamelCase , image_encoder=_lowerCamelCase , image_processor=_lowerCamelCase , scheduler=_lowerCamelCase , renderer=_lowerCamelCase , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' if latents is None: __snake_case : Union[str, Any] = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase ) else: if latents.shape != shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) __snake_case : Any = latents.to(_lowerCamelCase ) __snake_case : Dict = latents * scheduler.init_noise_sigma return latents def SCREAMING_SNAKE_CASE (self , a_=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) __snake_case : Optional[int] = torch.device(f"""cuda:{gpu_id}""" ) __snake_case : Optional[int] = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_lowerCamelCase , _lowerCamelCase ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_lowerCamelCase , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , ): '''simple docstring''' if isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(image[0] , torch.Tensor ): __snake_case : str = torch.cat(_lowerCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_lowerCamelCase , axis=0 ) if not isinstance(_lowerCamelCase , torch.Tensor ): __snake_case : int = self.image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 ) __snake_case : str = image.to(dtype=self.image_encoder.dtype , device=_lowerCamelCase ) __snake_case : Optional[int] = self.image_encoder(_lowerCamelCase )['''last_hidden_state'''] __snake_case : str = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 __snake_case : List[Any] = image_embeds.repeat_interleave(_lowerCamelCase , dim=0 ) if do_classifier_free_guidance: __snake_case : List[str] = torch.zeros_like(_lowerCamelCase ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __snake_case : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_lowerCamelCase ) def __call__(self , a_ , a_ = 1 , a_ = 25 , a_ = None , a_ = None , a_ = 4.0 , a_ = 64 , a_ = "pil" , a_ = True , ): '''simple docstring''' if isinstance(_lowerCamelCase , PIL.Image.Image ): __snake_case : List[Any] = 1 elif isinstance(_lowerCamelCase , torch.Tensor ): __snake_case : List[Any] = image.shape[0] elif isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): __snake_case : List[str] = len(_lowerCamelCase ) else: raise ValueError( f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_lowerCamelCase )}""" ) __snake_case : Tuple = self._execution_device __snake_case : Optional[Any] = batch_size * num_images_per_prompt __snake_case : Union[str, Any] = guidance_scale > 1.0 __snake_case : List[Any] = self._encode_image(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # prior self.scheduler.set_timesteps(_lowerCamelCase , device=_lowerCamelCase ) __snake_case : Any = self.scheduler.timesteps __snake_case : str = self.prior.config.num_embeddings __snake_case : List[str] = self.prior.config.embedding_dim __snake_case : Union[str, Any] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim __snake_case : List[Any] = latents.reshape(latents.shape[0] , _lowerCamelCase , _lowerCamelCase ) for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ): # expand the latents if we are doing classifier free guidance __snake_case : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __snake_case : Dict = self.scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) __snake_case : Tuple = self.prior( _lowerCamelCase , timestep=_lowerCamelCase , proj_embedding=_lowerCamelCase , ).predicted_image_embedding # remove the variance __snake_case : str = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: __snake_case : List[Any] = noise_pred.chunk(2 ) __snake_case : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) __snake_case : List[str] = self.scheduler.step( _lowerCamelCase , timestep=_lowerCamelCase , sample=_lowerCamelCase , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_lowerCamelCase ) __snake_case : List[str] = [] for i, latent in enumerate(_lowerCamelCase ): print() __snake_case : Union[str, Any] = self.renderer.decode( latent[None, :] , _lowerCamelCase , size=_lowerCamelCase , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , ) images.append(_lowerCamelCase ) __snake_case : Tuple = torch.stack(_lowerCamelCase ) if output_type not in ["np", "pil"]: raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""" ) __snake_case : Any = images.cpu().numpy() if output_type == "pil": __snake_case : Dict = [self.numpy_to_pil(_lowerCamelCase ) for image in images] # Offload last model to CPU if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_lowerCamelCase )
369
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ): '''simple docstring''' __snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20} __snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __snake_case : Tuple = parent __snake_case : Tuple = batch_size __snake_case : Tuple = num_channels __snake_case : List[str] = image_size __snake_case : Optional[Any] = min_resolution __snake_case : List[Any] = max_resolution __snake_case : List[Any] = do_resize __snake_case : Dict = size __snake_case : Dict = do_center_crop __snake_case : Dict = crop_size __snake_case : str = do_flip_channel_order def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _UpperCAmelCase ( __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = MobileViTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , '''do_resize''' ) ) self.assertTrue(hasattr(a_ , '''size''' ) ) self.assertTrue(hasattr(a_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(a_ , '''center_crop''' ) ) self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input __snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input __snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
24
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : str = { """microsoft/trocr-base-handwritten""": ( """https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json""" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class _UpperCAmelCase ( __UpperCamelCase ): '''simple docstring''' lowerCamelCase__ ="trocr" lowerCamelCase__ =["past_key_values"] lowerCamelCase__ ={ "num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model", "num_hidden_layers": "decoder_layers", } def __init__(self , a_=5_02_65 , a_=10_24 , a_=12 , a_=16 , a_=40_96 , a_="gelu" , a_=5_12 , a_=0.1 , a_=0.0 , a_=0.0 , a_=2 , a_=0.02 , a_=0.0 , a_=True , a_=False , a_=True , a_=True , a_=1 , a_=0 , a_=2 , **a_ , ): '''simple docstring''' __snake_case : Dict = vocab_size __snake_case : Optional[Any] = d_model __snake_case : str = decoder_layers __snake_case : List[Any] = decoder_attention_heads __snake_case : Dict = decoder_ffn_dim __snake_case : Tuple = activation_function __snake_case : Dict = max_position_embeddings __snake_case : str = dropout __snake_case : str = attention_dropout __snake_case : Dict = activation_dropout __snake_case : Any = init_std __snake_case : Optional[Any] = decoder_layerdrop __snake_case : Optional[Any] = use_cache __snake_case : Optional[int] = scale_embedding __snake_case : Any = use_learned_position_embeddings __snake_case : Optional[int] = layernorm_embedding super().__init__( pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
370
"""simple docstring""" import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def lowercase ( ) ->Optional[int]: """simple docstring""" __snake_case : int = torch.nn.Linear(2 , 4 ) __snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 ) __snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) __snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) __snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def lowercase ( _snake_case : str ) ->Optional[Any]: """simple docstring""" return (model.weight.abs().sum() + model.bias.abs().sum()).item() def lowercase ( _snake_case : Union[str, Any] ) ->Tuple: """simple docstring""" __snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(_snake_case ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(a_ ): __snake_case : Any = Accelerator(cpu=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case : Optional[int] = GradientState() assert state.num_steps == 1 __snake_case : str = 4 assert state.num_steps == 4 assert state.sync_gradients is True __snake_case : List[Any] = False assert state.sync_gradients is False GradientState._reset_state() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*a_ , **a_ ): pass with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ): __snake_case : List[Any] = Accelerator() self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : Any = get_signature(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # make sure loaded weights match accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : List[Any] = get_signature(a_ ) # saving hook def save_config(a_ , a_ , a_ ): __snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__} with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f: json.dump(a_ , a_ ) # loading hook def load_config(a_ , a_ ): with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f: __snake_case : Any = json.load(a_ ) __snake_case : List[str] = config['''class_name'''] __snake_case : str = accelerator.register_save_state_pre_hook(a_ ) __snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Any = '''random''' # make sure loaded weights match with hooks accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks removed load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Union[str, Any] = '''random''' # make sure loaded weights match with hooks removed accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components() __snake_case : Union[str, Any] = None # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertTrue(dummy_obj is None ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() __snake_case : Optional[int] = [1, 2, 3] # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , ) __snake_case : Optional[Any] = Accelerator() # This should work __snake_case : Any = accelerator.prepare(a_ ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Any = Accelerator() with init_empty_weights(): __snake_case : List[str] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : Union[str, Any] = infer_auto_device_map(a_ ) __snake_case : str = '''cpu''' __snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ ) # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Dict = accelerator.prepare(a_ ) @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU} with init_empty_weights(): __snake_case : Any = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : List[Any] = infer_auto_device_map(a_ ) __snake_case : Dict = 1 __snake_case : str = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Any = Accelerator() # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Tuple = accelerator.prepare(a_ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM with init_empty_weights(): __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) __snake_case : Tuple = infer_auto_device_map(a_ ) __snake_case : Tuple = 1 __snake_case : List[Any] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Tuple = Accelerator() # This should work __snake_case : Dict = accelerator.prepare(a_ ) @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = torch.nn.Linear(10 , 10 ) __snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 ) __snake_case : Optional[Any] = Accelerator(cpu=a_ ) __snake_case : str = accelerator.prepare(a_ )
24
0