code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ): A_ : Optional[datasets.Features] = None class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ): A_ : Optional[int] = PandasConfig def a (self : List[Any] ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def a (self : Optional[int] , a__ : Union[str, Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __snake_case = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a__ , (str, list, tuple) ): __snake_case = data_files if isinstance(a__ , a__ ): __snake_case = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case = [dl_manager.iter_files(a__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __snake_case = [] for split_name, files in data_files.items(): if isinstance(a__ , a__ ): __snake_case = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case = [dl_manager.iter_files(a__ ) for file in files] splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={'''files''': files} ) ) return splits def a (self : Tuple , a__ : pa.Table ): """simple docstring""" if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __snake_case = table_cast(a__ , self.config.features.arrow_schema ) return pa_table def a (self : Union[str, Any] , a__ : str ): """simple docstring""" for i, file in enumerate(itertools.chain.from_iterable(a__ ) ): with open(a__ , '''rb''' ) as f: __snake_case = pa.Table.from_pandas(pd.read_pickle(a__ ) ) yield i, self._cast_table(a__ )
24
from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def lowerCamelCase__ ( ) -> Any: import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join __snake_case = '''__test_patch_submodule_mock__''' with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def lowerCamelCase__ ( ) -> Any: assert _test_patching.open is open __snake_case = '''__test_patch_submodule_builtin_mock__''' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , '''open''' , snake_case_ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def lowerCamelCase__ ( ) -> List[str]: # pandas.read_csv is not present in _test_patching __snake_case = '''__test_patch_submodule_missing_mock__''' with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ): pass def lowerCamelCase__ ( ) -> Union[str, Any]: # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point __snake_case = '''__test_patch_submodule_missing_builtin_mock__''' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , '''len''' , snake_case_ ) is None with patch_submodule(_test_patching , '''len''' , snake_case_ ): assert _test_patching.len is mock assert _test_patching.len is len def lowerCamelCase__ ( ) -> Union[str, Any]: __snake_case = '''__test_patch_submodule_start_and_stop_mock__''' __snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def lowerCamelCase__ ( ) -> Optional[int]: from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join __snake_case = '''__test_patch_submodule_successive_join__''' __snake_case = '''__test_patch_submodule_successive_dirname__''' __snake_case = '''__test_patch_submodule_successive_rename__''' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ): with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ): with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ): with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ): with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def lowerCamelCase__ ( ) -> Tuple: __snake_case = '''__test_patch_submodule_doesnt_exist_mock__''' with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ): pass with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ): pass
24
1
import argparse import os import re snake_case_ = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict snake_case_ = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings snake_case_ = re.compile(R'\s*\(\s*"(\S[^"]+)"') def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : bool = False ) -> Optional[int]: with open(snake_case_ , '''r''' , encoding='''utf-8''' ) as f: __snake_case = f.read() __snake_case = content.split('''\n''' ) __snake_case = [] __snake_case = 0 while line_idx < len(snake_case_ ): if _re_intro_mapping.search(lines[line_idx] ) is not None: __snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(''' ''' * indent + '''(''' ): new_lines.append(lines[line_idx] ) line_idx += 1 __snake_case = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": __snake_case = line_idx while not lines[line_idx].startswith(''' ''' * indent + ''')''' ): line_idx += 1 blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers __snake_case = sorted(snake_case_ , key=lambda snake_case_ : _re_identifier.search(snake_case_ ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(snake_case_ ) ) elif "\n".join(snake_case_ ) != content: return True def lowerCamelCase__ ( snake_case_ : bool = False ) -> Union[str, Any]: __snake_case = [os.path.join(snake_case_ , snake_case_ ) for f in os.listdir(snake_case_ ) if f.endswith('''.py''' )] __snake_case = [sort_auto_mapping(snake_case_ , overwrite=snake_case_ ) for fname in fnames] if not overwrite and any(snake_case_ ): __snake_case = [f for f, d in zip(snake_case_ , snake_case_ ) if d] raise ValueError( f"""The following files have auto mappings that need sorting: {', '.join(snake_case_ )}. Run `make style` to fix""" ''' this.''' ) if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') snake_case_ = parser.parse_args() sort_all_auto_mappings(not args.check_only)
24
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) snake_case_ = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE__ : A_ : str = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} ) A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} ) @dataclass class SCREAMING_SNAKE_CASE__ : A_ : str = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) A_ : Optional[str] = field( default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , ) A_ : Optional[int] = field( default=1_024 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A_ : Optional[int] = field( default=128 , metadata={ 'help': ( 'The maximum total sequence length for target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A_ : Optional[int] = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for validation target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded. ' 'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ' 'during ``evaluate`` and ``predict``.' ) } , ) A_ : Optional[int] = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for test target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} ) A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} ) A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} ) A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} ) A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} ) A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} ) A_ : bool = field( default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , ) def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str: logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) ) def lowerCamelCase__ ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses() check_output_dir(snake_case_ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , snake_case_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(snake_case_ , snake_case_ , snake_case_ ): assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) ) __snake_case = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __snake_case = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(snake_case_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __snake_case = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(snake_case_ , snake_case_ ): __snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(snake_case_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __snake_case = SeqaSeqDataset # Get datasets __snake_case = ( dataset_class( snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) __snake_case = ( dataset_class( snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __snake_case = ( dataset_class( snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer __snake_case = ( build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None ) __snake_case = SeqaSeqTrainer( model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator( snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , ) __snake_case = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) __snake_case = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __snake_case = train_result.metrics __snake_case = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __snake_case = trainer.evaluate(metric_key_prefix='''val''' ) __snake_case = data_args.n_val __snake_case = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.do_predict: logger.info('''*** Predict ***''' ) __snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' ) __snake_case = test_output.metrics __snake_case = data_args.n_test if trainer.is_world_process_zero(): __snake_case = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.predict_with_generate: __snake_case = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ ) __snake_case = lmap(str.strip , snake_case_ ) write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
24
1
import re def lowerCamelCase__ ( snake_case_ : str ) -> list: return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )] def lowerCamelCase__ ( snake_case_ : str ) -> str: __snake_case = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def lowerCamelCase__ ( snake_case_ : str , snake_case_ : bool , snake_case_ : str ) -> str: try: __snake_case = split_input(snake_case_ ) if upper: __snake_case = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __snake_case = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def lowerCamelCase__ ( snake_case_ : str ) -> str: return to_simple_case(snake_case_ ) def lowerCamelCase__ ( snake_case_ : str ) -> str: try: __snake_case = to_simple_case(snake_case_ ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def lowerCamelCase__ ( snake_case_ : str , snake_case_ : bool ) -> str: return to_complex_case(snake_case_ , snake_case_ , '''_''' ) def lowerCamelCase__ ( snake_case_ : str , snake_case_ : bool ) -> str: return to_complex_case(snake_case_ , snake_case_ , '''-''' ) if __name__ == "__main__": __import__('doctest').testmod()
24
from math import pi def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
24
1
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) snake_case_ = logging.getLogger(__name__) @dataclass(frozen=_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ : A_ : str A_ : str A_ : Optional[str] = None A_ : Optional[str] = None A_ : Optional[str] = None @dataclass(frozen=_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ : A_ : List[int] A_ : Optional[List[int]] = None A_ : Optional[List[int]] = None A_ : Optional[Union[int, float]] = None A_ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[InputFeatures] def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ): """simple docstring""" __snake_case = hans_processors[task]() __snake_case = os.path.join( a__ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , ) __snake_case = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case = label_list[2], label_list[1] __snake_case = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case = cached_features_file + '''.lock''' with FileLock(a__ ): if os.path.exists(a__ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case = torch.load(a__ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case = ( processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ ) ) logger.info('''Training examples: %s''' , len(a__ ) ) __snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ ) logger.info('''Saving features into cached file %s''' , a__ ) torch.save(self.features , a__ ) def __len__(self : int ): """simple docstring""" return len(self.features ) def __getitem__(self : Dict , a__ : List[Any] ): """simple docstring""" return self.features[i] def a (self : List[Any] ): """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class SCREAMING_SNAKE_CASE__ : A_ : List[InputFeatures] def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ): """simple docstring""" __snake_case = hans_processors[task]() __snake_case = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case = label_list[2], label_list[1] __snake_case = label_list __snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ ) __snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case = tf.data.Dataset.from_generator( a__ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def a (self : Union[str, Any] ): """simple docstring""" return self.dataset def __len__(self : Dict ): """simple docstring""" return len(self.features ) def __getitem__(self : Any , a__ : Dict ): """simple docstring""" return self.features[i] def a (self : str ): """simple docstring""" return self.label_list class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def a (self : Dict , a__ : Dict ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def a (self : Optional[int] , a__ : Tuple ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def a (self : int ): """simple docstring""" return ["contradiction", "entailment", "neutral"] def a (self : Any , a__ : Optional[int] , a__ : List[Any] ): """simple docstring""" __snake_case = [] for i, line in enumerate(a__ ): if i == 0: continue __snake_case = '''%s-%s''' % (set_type, line[0]) __snake_case = line[5] __snake_case = line[6] __snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case = line[0] examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) ) return examples def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]: __snake_case = {label: i for i, label in enumerate(snake_case_ )} __snake_case = [] for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case = tokenizer( example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , ) __snake_case = label_map[example.label] if example.label in label_map else 0 __snake_case = int(example.pairID ) features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features snake_case_ = { 'hans': 3, } snake_case_ = { 'hans': HansProcessor, }
24
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[Any] = 'vit_msn' def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ): """simple docstring""" super().__init__(**a__ ) __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = image_size __snake_case = patch_size __snake_case = num_channels __snake_case = qkv_bias
24
1
import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowerCamelCase__ ( ) -> Dict: raise RuntimeError('''CUDA out of memory.''' ) class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__(self : List[Any] ): """simple docstring""" super().__init__() __snake_case = nn.Linear(3 , 4 ) __snake_case = nn.BatchNormad(4 ) __snake_case = nn.Linear(4 , 5 ) def a (self : Optional[Any] , a__ : List[str] ): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(a__ ) ) ) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Any ): """simple docstring""" __snake_case = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(a__ : Any ): nonlocal batch_sizes batch_sizes.append(a__ ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(a__ , [128, 64, 32, 16, 8] ) def a (self : Tuple ): """simple docstring""" __snake_case = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(a__ : str , a__ : Tuple ): nonlocal batch_sizes batch_sizes.append(a__ ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga __snake_case , __snake_case = mock_training_loop_function('''hello''' ) self.assertListEqual(a__ , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def a (self : List[Any] ): """simple docstring""" @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(a__ : Optional[int] ): pass with self.assertRaises(a__ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def a (self : Tuple ): """simple docstring""" @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(a__ : str ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(a__ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def a (self : int ): """simple docstring""" @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(a__ : int , a__ : Tuple , a__ : int ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(a__ ) as cm: mock_training_loop_function(128 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def a (self : Optional[Any] ): """simple docstring""" @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(a__ : Optional[Any] ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(a__ ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def a (self : Union[str, Any] ): """simple docstring""" __snake_case = torch.cuda.memory_allocated() __snake_case = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , a__ ) __snake_case = release_memory(a__ ) self.assertEqual(torch.cuda.memory_allocated() , a__ )
24
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def a (self : int , a__ : List[Any]=0 ): """simple docstring""" __snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) ) __snake_case = np.random.RandomState(a__ ) __snake_case = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.7_5, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def a (self : List[Any] ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def a (self : Dict ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : List[str] ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) # warmup pass to apply optimizations __snake_case = pipe(**self.get_dummy_inputs() ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : Any ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : Dict ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : List[str] ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def a (self : List[str] ): """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a (self : Optional[Any] ): """simple docstring""" __snake_case = ort.SessionOptions() __snake_case = False return options def a (self : Optional[Any] ): """simple docstring""" __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __snake_case = init_image.resize((768, 512) ) # using the PNDM scheduler by default __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = '''A fantasy landscape, trending on artstation''' __snake_case = np.random.RandomState(0 ) __snake_case = pipe( prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , ) __snake_case = output.images __snake_case = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def a (self : Dict ): """simple docstring""" __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __snake_case = init_image.resize((768, 512) ) __snake_case = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = '''A fantasy landscape, trending on artstation''' __snake_case = np.random.RandomState(0 ) __snake_case = pipe( prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , ) __snake_case = output.images __snake_case = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
24
1
import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ) -> Union[str, Any]: __snake_case = OmegaConf.load(snake_case_ ) __snake_case = torch.load(snake_case_ , map_location='''cpu''' )['''model'''] __snake_case = list(state_dict.keys() ) # extract state_dict for VQVAE __snake_case = {} __snake_case = '''first_stage_model.''' for key in keys: if key.startswith(snake_case_ ): __snake_case = state_dict[key] # extract state_dict for UNetLDM __snake_case = {} __snake_case = '''model.diffusion_model.''' for key in keys: if key.startswith(snake_case_ ): __snake_case = state_dict[key] __snake_case = config.model.params.first_stage_config.params __snake_case = config.model.params.unet_config.params __snake_case = VQModel(**snake_case_ ).eval() vqvae.load_state_dict(snake_case_ ) __snake_case = UNetLDMModel(**snake_case_ ).eval() unet.load_state_dict(snake_case_ ) __snake_case = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case_ , ) __snake_case = LDMPipeline(snake_case_ , snake_case_ , snake_case_ ) pipeline.save_pretrained(snake_case_ ) if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) snake_case_ = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
24
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) snake_case_ = logging.getLogger(__name__) @dataclass(frozen=_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ : A_ : str A_ : str A_ : Optional[str] = None A_ : Optional[str] = None A_ : Optional[str] = None @dataclass(frozen=_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ : A_ : List[int] A_ : Optional[List[int]] = None A_ : Optional[List[int]] = None A_ : Optional[Union[int, float]] = None A_ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[InputFeatures] def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ): """simple docstring""" __snake_case = hans_processors[task]() __snake_case = os.path.join( a__ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , ) __snake_case = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case = label_list[2], label_list[1] __snake_case = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case = cached_features_file + '''.lock''' with FileLock(a__ ): if os.path.exists(a__ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case = torch.load(a__ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case = ( processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ ) ) logger.info('''Training examples: %s''' , len(a__ ) ) __snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ ) logger.info('''Saving features into cached file %s''' , a__ ) torch.save(self.features , a__ ) def __len__(self : int ): """simple docstring""" return len(self.features ) def __getitem__(self : Dict , a__ : List[Any] ): """simple docstring""" return self.features[i] def a (self : List[Any] ): """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class SCREAMING_SNAKE_CASE__ : A_ : List[InputFeatures] def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ): """simple docstring""" __snake_case = hans_processors[task]() __snake_case = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case = label_list[2], label_list[1] __snake_case = label_list __snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ ) __snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case = tf.data.Dataset.from_generator( a__ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def a (self : Union[str, Any] ): """simple docstring""" return self.dataset def __len__(self : Dict ): """simple docstring""" return len(self.features ) def __getitem__(self : Any , a__ : Dict ): """simple docstring""" return self.features[i] def a (self : str ): """simple docstring""" return self.label_list class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def a (self : Dict , a__ : Dict ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def a (self : Optional[int] , a__ : Tuple ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def a (self : int ): """simple docstring""" return ["contradiction", "entailment", "neutral"] def a (self : Any , a__ : Optional[int] , a__ : List[Any] ): """simple docstring""" __snake_case = [] for i, line in enumerate(a__ ): if i == 0: continue __snake_case = '''%s-%s''' % (set_type, line[0]) __snake_case = line[5] __snake_case = line[6] __snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case = line[0] examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) ) return examples def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]: __snake_case = {label: i for i, label in enumerate(snake_case_ )} __snake_case = [] for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case = tokenizer( example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , ) __snake_case = label_map[example.label] if example.label in label_map else 0 __snake_case = int(example.pairID ) features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features snake_case_ = { 'hans': 3, } snake_case_ = { 'hans': HansProcessor, }
24
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) snake_case_ = { 'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegaForCausalLM', 'MegaForMaskedLM', 'MegaForMultipleChoice', 'MegaForQuestionAnswering', 'MegaForSequenceClassification', 'MegaForTokenClassification', 'MegaModel', 'MegaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
24
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[str] = ['image_processor', 'tokenizer'] A_ : Optional[Any] = 'CLIPImageProcessor' A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ): """simple docstring""" __snake_case = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a__ , ) __snake_case = kwargs.pop('''feature_extractor''' ) __snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a__ , a__ ) def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ ) if images is not None: __snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ ) if text is not None and images is not None: __snake_case = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ ) def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*a__ , **a__ ) def a (self : Any , *a__ : List[Any] , **a__ : List[str] ): """simple docstring""" return self.tokenizer.decode(*a__ , **a__ ) @property def a (self : int ): """simple docstring""" __snake_case = self.tokenizer.model_input_names __snake_case = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
24
1
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar snake_case_ = TypeVar('T') class SCREAMING_SNAKE_CASE__ ( Generic[T] ): def __init__(self : List[Any] , a__ : list[T] , a__ : Callable[[T, T], T] ): """simple docstring""" __snake_case = None __snake_case = len(a__ ) __snake_case = [any_type for _ in range(self.N )] + arr __snake_case = fnc self.build() def a (self : Dict ): """simple docstring""" for p in range(self.N - 1 , 0 , -1 ): __snake_case = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def a (self : Any , a__ : int , a__ : T ): """simple docstring""" p += self.N __snake_case = v while p > 1: __snake_case = p // 2 __snake_case = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def a (self : int , a__ : int , a__ : int ): # noqa: E741 """simple docstring""" __snake_case , __snake_case = l + self.N, r + self.N __snake_case = None while l <= r: if l % 2 == 1: __snake_case = self.st[l] if res is None else self.fn(a__ , self.st[l] ) if r % 2 == 0: __snake_case = self.st[r] if res is None else self.fn(a__ , self.st[r] ) __snake_case , __snake_case = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce snake_case_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] snake_case_ = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } snake_case_ = SegmentTree(test_array, min) snake_case_ = SegmentTree(test_array, max) snake_case_ = SegmentTree(test_array, lambda a, b: a + b) def lowerCamelCase__ ( ) -> None: for i in range(len(snake_case_ ) ): for j in range(snake_case_ , len(snake_case_ ) ): __snake_case = reduce(snake_case_ , test_array[i : j + 1] ) __snake_case = reduce(snake_case_ , test_array[i : j + 1] ) __snake_case = reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(snake_case_ , snake_case_ ) assert max_range == max_segment_tree.query(snake_case_ , snake_case_ ) assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ ) test_all_segments() for index, value in test_updates.items(): snake_case_ = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
24
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Dict ): """simple docstring""" __snake_case = logging.get_logger() # the current default level is logging.WARNING __snake_case = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(a__ ) def a (self : Dict ): """simple docstring""" __snake_case = logging.get_verbosity() __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = '''Testing 1, 2, 3''' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , '''''' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) # restore to the original level logging.set_verbosity(a__ ) @mockenv(TRANSFORMERS_VERBOSITY='''error''' ) def a (self : Dict ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() # this action activates the env var __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ ) __snake_case = logging.log_levels[env_level_str] __snake_case = logging.get_verbosity() self.assertEqual( a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level __snake_case = '''''' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='''super-error''' ) def a (self : List[Any] ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() __snake_case = logging.logging.getLogger() with CaptureLogger(a__ ) as cl: # this action activates the env var logging.get_logger('''transformers.models.bart.tokenization_bart''' ) self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out ) # no need to restore as nothing was changed def a (self : Any ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = '''Testing 1, 2, 3''' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ): # nothing should be logged as env var disables this method with CaptureLogger(a__ ) as cl: logger.warning_advice(a__ ) self.assertEqual(cl.out , '''''' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(a__ ) as cl: logger.warning_advice(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) def lowerCamelCase__ ( ) -> str: disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
24
1
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def lowerCamelCase__ ( ) -> List[Any]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case_ ): requests.request('''GET''' , '''https://huggingface.co''' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 ) @pytest.mark.integration def lowerCamelCase__ ( ) -> Union[str, Any]: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('''GET''' , '''https://huggingface.co''' ) def lowerCamelCase__ ( ) -> Any: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case_ ): http_head('''https://huggingface.co''' )
24
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : List[str] = CpmAntTokenizer A_ : Optional[int] = False def a (self : Optional[int] ): """simple docstring""" super().setUp() __snake_case = [ '''<d>''', '''</d>''', '''<s>''', '''</s>''', '''</_>''', '''<unk>''', '''<pad>''', '''</n>''', '''我''', '''是''', '''C''', '''P''', '''M''', '''A''', '''n''', '''t''', ] __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) @tooslow def a (self : Dict ): """simple docstring""" __snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' ) __snake_case = '''今天天气真好!''' __snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!'''] __snake_case = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) __snake_case = '''今天天气真好!''' __snake_case = [tokenizer.bos_token] + tokens __snake_case = [6, 9802, 1_4962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ ) __snake_case = tokenizer.decode(a__ ) self.assertEqual(a__ , a__ )
24
1
import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process snake_case_ = logging.getLogger(__name__) snake_case_ = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) snake_case_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class SCREAMING_SNAKE_CASE__ : A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={ 'help': ( 'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.' ) } , ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCAmelCase )} , ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) A_ : bool = field( default=_UpperCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) A_ : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) A_ : bool = field( default=_UpperCAmelCase , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) def a (self : Tuple ): """simple docstring""" if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( '''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' ) @dataclass class SCREAMING_SNAKE_CASE__ : A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'The input training data file (a text file).'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , ) A_ : bool = field( default=_UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) A_ : Optional[int] = field( default=5 , metadata={ 'help': 'The percentage of the train set used as validation set in case there\'s no validation split' } , ) A_ : Optional[int] = field( default=_UpperCAmelCase , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated. Default to the max input length of the model.' ) } , ) A_ : Optional[int] = field( default=_UpperCAmelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , ) A_ : float = field( default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} ) A_ : bool = field( default=_UpperCAmelCase , metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) } , ) def a (self : int ): """simple docstring""" if self.train_file is not None: __snake_case = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: __snake_case = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : int ) -> Optional[Any]: with open(snake_case_ , '''r''' , encoding='''utf-8''' ) as f: __snake_case = [json.loads(snake_case_ ) for line in f.read().splitlines() if (len(snake_case_ ) > 0 and not line.isspace())] assert len(snake_case_ ) == len(snake_case_ ) __snake_case = {c: dataset[c] for c in dataset.column_names} __snake_case = refs return Dataset.from_dict(snake_case_ ) def lowerCamelCase__ ( ) -> List[str]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses() # Detecting last checkpoint. __snake_case = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __snake_case = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , snake_case_ ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __snake_case = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): __snake_case = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"""train[:{data_args.validation_split_percentage}%]""" , ) __snake_case = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"""train[{data_args.validation_split_percentage}%:]""" , ) else: __snake_case = {} if data_args.train_file is not None: __snake_case = data_args.train_file if data_args.validation_file is not None: __snake_case = data_args.validation_file __snake_case = data_args.train_file.split('''.''' )[-1] if extension == "txt": __snake_case = '''text''' __snake_case = load_dataset(snake_case_ , data_files=snake_case_ ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name: __snake_case = AutoConfig.from_pretrained(model_args.config_name , **snake_case_ ) elif model_args.model_name_or_path: __snake_case = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case_ ) else: __snake_case = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(f"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(f"""New config: {config}""" ) __snake_case = { '''cache_dir''': model_args.cache_dir, '''use_fast''': model_args.use_fast_tokenizer, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.tokenizer_name: __snake_case = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **snake_case_ ) elif model_args.model_name_or_path: __snake_case = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **snake_case_ ) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) if model_args.model_name_or_path: __snake_case = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __snake_case = AutoModelForMaskedLM.from_config(snake_case_ ) model.resize_token_embeddings(len(snake_case_ ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: __snake_case = datasets['''train'''].column_names else: __snake_case = datasets['''validation'''].column_names __snake_case = '''text''' if '''text''' in column_names else column_names[0] __snake_case = '''max_length''' if data_args.pad_to_max_length else False def tokenize_function(snake_case_ : Any ): # Remove empty lines __snake_case = [line for line in examples['''text'''] if len(snake_case_ ) > 0 and not line.isspace()] return tokenizer(examples['''text'''] , padding=snake_case_ , truncation=snake_case_ , max_length=data_args.max_seq_length ) __snake_case = datasets.map( snake_case_ , batched=snake_case_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: __snake_case = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: __snake_case = add_chinese_references( tokenized_datasets['''validation'''] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer __snake_case = data_args.train_ref_file or data_args.validation_ref_file if has_ref: __snake_case = False # Data collator # This one will take care of randomly masking the tokens. __snake_case = DataCollatorForWholeWordMask(tokenizer=snake_case_ , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer __snake_case = Trainer( model=snake_case_ , args=snake_case_ , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=snake_case_ , data_collator=snake_case_ , ) # Training if training_args.do_train: if last_checkpoint is not None: __snake_case = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): __snake_case = model_args.model_name_or_path else: __snake_case = None __snake_case = trainer.train(resume_from_checkpoint=snake_case_ ) trainer.save_model() # Saves the tokenizer too for easy upload __snake_case = os.path.join(training_args.output_dir , '''train_results.txt''' ) if trainer.is_world_process_zero(): with open(snake_case_ , '''w''' ) as writer: logger.info('''***** Train results *****''' ) for key, value in sorted(train_result.metrics.items() ): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # Evaluation __snake_case = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __snake_case = trainer.evaluate() __snake_case = math.exp(eval_output['''eval_loss'''] ) __snake_case = perplexity __snake_case = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' ) if trainer.is_world_process_zero(): with open(snake_case_ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in sorted(results.items() ): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) return results def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
24
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class SCREAMING_SNAKE_CASE__ : def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ): """simple docstring""" __snake_case = parent __snake_case = out_indices if out_indices is not None else [4] __snake_case = stage_names __snake_case = out_features __snake_case = backbone __snake_case = batch_size __snake_case = image_size __snake_case = num_channels __snake_case = use_pretrained_backbone __snake_case = is_training def a (self : Union[str, Any] ): """simple docstring""" __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = self.get_config() return config, pixel_values def a (self : Any ): """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def a (self : List[Any] , a__ : int , a__ : int ): """simple docstring""" __snake_case = TimmBackbone(config=a__ ) model.to(a__ ) model.eval() with torch.no_grad(): __snake_case = model(a__ ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def a (self : str ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case = config_and_inputs __snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch @require_timm class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else () A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {} A_ : List[Any] = False A_ : Dict = False A_ : Any = False A_ : List[Any] = False def a (self : Tuple ): """simple docstring""" __snake_case = TimmBackboneModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ ) def a (self : Any ): """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a (self : int ): """simple docstring""" __snake_case = '''resnet18''' __snake_case = '''microsoft/resnet-18''' __snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ ) __snake_case = AutoBackbone.from_pretrained(a__ ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] ) __snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' ) def a (self : str ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''TimmBackbone initialization is managed on the timm side''' ) def a (self : Union[str, Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def a (self : Optional[int] ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def a (self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : Dict ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : List[Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' ) def a (self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' ) def a (self : List[Any] ): """simple docstring""" pass @unittest.skip('''Safetensors is not supported by timm.''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def a (self : Tuple ): """simple docstring""" pass def a (self : Tuple ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a__ ) def a (self : Dict ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = True __snake_case = self.has_attentions # no need to test all models as different heads yield the same functionality __snake_case = self.all_model_classes[0] __snake_case = model_class(a__ ) model.to(a__ ) __snake_case = self._prepare_for_class(a__ , a__ ) __snake_case = model(**a__ ) __snake_case = outputs[0][-1] # Encoder-/Decoder-only models __snake_case = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __snake_case = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=a__ ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def a (self : Optional[int] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __snake_case = copy.deepcopy(a__ ) __snake_case = None __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __snake_case = copy.deepcopy(a__ ) __snake_case = False __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ )
24
1
import math def lowerCamelCase__ ( snake_case_ : int ) -> int: if not isinstance(snake_case_ , snake_case_ ): __snake_case = f"""Input value of [number={number}] must be an integer""" raise TypeError(snake_case_ ) if number < 1: __snake_case = f"""Input value of [number={number}] must be > 0""" raise ValueError(snake_case_ ) elif number == 1: return 3 elif number == 2: return 5 else: __snake_case = int(math.log(number // 3 , 2 ) ) + 2 __snake_case = [3, 5] __snake_case = 2 __snake_case = 3 for block in range(1 , snake_case_ ): for _ in range(snake_case_ ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): snake_case_ = 0 try: snake_case_ = proth(number) except ValueError: print(F'ValueError: there is no {number}th Proth number') continue print(F'The {number}th Proth number: {value}')
24
import os import pytest from transformers.dynamic_module_utils import get_imports snake_case_ = '\nimport os\n' snake_case_ = '\ndef foo():\n import os\n return False\n' snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n' snake_case_ = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , snake_case_ ) def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict: __snake_case = os.path.join(snake_case_ , '''test_file.py''' ) with open(snake_case_ , '''w''' ) as _tmp_file: _tmp_file.write(snake_case_ ) __snake_case = get_imports(snake_case_ ) assert parsed_imports == ["os"]
24
1
from typing import List from .keymap import KEYMAP, get_character def lowerCamelCase__ ( snake_case_ : str ) -> Any: def decorator(snake_case_ : str ): __snake_case = getattr(snake_case_ , '''handle_key''' , [] ) handle += [key] setattr(snake_case_ , '''handle_key''' , snake_case_ ) return func return decorator def lowerCamelCase__ ( *snake_case_ : List[str] ) -> Optional[Any]: def decorator(snake_case_ : Dict ): __snake_case = getattr(snake_case_ , '''handle_key''' , [] ) handle += keys setattr(snake_case_ , '''handle_key''' , snake_case_ ) return func return decorator class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def __new__(cls : Union[str, Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : str ): """simple docstring""" __snake_case = super().__new__(cls , a__ , a__ , a__ ) if not hasattr(a__ , '''key_handler''' ): setattr(a__ , '''key_handler''' , {} ) setattr(a__ , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): __snake_case = getattr(a__ , '''handle_key''' , [] ) for key in handled_keys: __snake_case = value return new_cls @staticmethod def a (cls : Optional[int] ): """simple docstring""" __snake_case = get_character() if char != KEYMAP["undefined"]: __snake_case = ord(a__ ) __snake_case = cls.key_handler.get(a__ ) if handler: __snake_case = char return handler(cls ) else: return None def lowerCamelCase__ ( cls : List[Any] ) -> Union[str, Any]: return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
24
import socket def lowerCamelCase__ ( ) -> Any: __snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) __snake_case = socket.gethostname() __snake_case = 1_2312 sock.connect((host, port) ) sock.send(B'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: __snake_case = sock.recv(1024 ) if not data: break out_file.write(snake_case_ ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
24
1
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> np.ndarray: # prepare kernel # the kernel size have to be odd if (ksize % 2) == 0: __snake_case = ksize + 1 __snake_case = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(snake_case_ ): for x in range(snake_case_ ): # distance from center __snake_case = x - ksize // 2 __snake_case = y - ksize // 2 # degree to radiant __snake_case = theta / 180 * np.pi __snake_case = np.cos(_theta ) __snake_case = np.sin(_theta ) # get kernel x __snake_case = cos_theta * px + sin_theta * py # get kernel y __snake_case = -sin_theta * px + cos_theta * py # fill kernel __snake_case = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image snake_case_ = imread('../image_data/lena.jpg') # turn image in gray scale value snake_case_ = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges snake_case_ = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: snake_case_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) snake_case_ = out / out.max() * 255 snake_case_ = out.astype(np.uinta) imshow('Original', gray) imshow('Gabor filter with 20x20 mask and 6 directions', out) waitKey(0)
24
from __future__ import annotations def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive __snake_case = len(snake_case_ ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else __snake_case = array[0] __snake_case = False __snake_case = 1 __snake_case = [] while not is_found and i < array_length: if array[i] < pivot: __snake_case = True __snake_case = [element for element in array[i:] if element >= array[i]] __snake_case = longest_subsequence(snake_case_ ) if len(snake_case_ ) > len(snake_case_ ): __snake_case = temp_array else: i += 1 __snake_case = [element for element in array[1:] if element >= pivot] __snake_case = [pivot, *longest_subsequence(snake_case_ )] if len(snake_case_ ) > len(snake_case_ ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
24
1
import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Optional[int] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def a (self : int ): """simple docstring""" __snake_case = 1 __snake_case = 3 __snake_case = (32, 32) __snake_case = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a__ ) return image @property def a (self : Tuple ): """simple docstring""" torch.manual_seed(0 ) __snake_case = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) return model @property def a (self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) __snake_case = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def a (self : Tuple ): """simple docstring""" torch.manual_seed(0 ) __snake_case = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , ) return RobertaSeriesModelWithTransformation(a__ ) @property def a (self : Any ): """simple docstring""" def extract(*a__ : Any , **a__ : List[str] ): class SCREAMING_SNAKE_CASE__ : def __init__(self : List[str] ): """simple docstring""" __snake_case = torch.ones([0] ) def a (self : Any , a__ : Any ): """simple docstring""" self.pixel_values.to(a__ ) return self return Out() return extract def a (self : str ): """simple docstring""" __snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator __snake_case = self.dummy_cond_unet __snake_case = PNDMScheduler(skip_prk_steps=a__ ) __snake_case = self.dummy_vae __snake_case = self.dummy_text_encoder __snake_case = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) __snake_case = 77 __snake_case = self.dummy_image.to(a__ ) __snake_case = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk __snake_case = AltDiffusionImgaImgPipeline( unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , ) __snake_case = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=a__ ) __snake_case = alt_pipe.to(a__ ) alt_pipe.set_progress_bar_config(disable=a__ ) __snake_case = '''A painting of a squirrel eating a burger''' __snake_case = torch.Generator(device=a__ ).manual_seed(0 ) __snake_case = alt_pipe( [prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=a__ , ) __snake_case = output.images __snake_case = torch.Generator(device=a__ ).manual_seed(0 ) __snake_case = alt_pipe( [prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=a__ , return_dict=a__ , )[0] __snake_case = image[0, -3:, -3:, -1] __snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __snake_case = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def a (self : int ): """simple docstring""" __snake_case = self.dummy_cond_unet __snake_case = PNDMScheduler(skip_prk_steps=a__ ) __snake_case = self.dummy_vae __snake_case = self.dummy_text_encoder __snake_case = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) __snake_case = 77 __snake_case = self.dummy_image.to(a__ ) # put models in fp16 __snake_case = unet.half() __snake_case = vae.half() __snake_case = bert.half() # make sure here that pndm scheduler skips prk __snake_case = AltDiffusionImgaImgPipeline( unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , ) __snake_case = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=a__ ) __snake_case = alt_pipe.to(a__ ) alt_pipe.set_progress_bar_config(disable=a__ ) __snake_case = '''A painting of a squirrel eating a burger''' __snake_case = torch.manual_seed(0 ) __snake_case = alt_pipe( [prompt] , generator=a__ , num_inference_steps=2 , output_type='''np''' , image=a__ , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def a (self : List[Any] ): """simple docstring""" __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) # resize to resolution that is divisible by 8 but not 16 or 32 __snake_case = init_image.resize((760, 504) ) __snake_case = '''BAAI/AltDiffusion''' __snake_case = AltDiffusionImgaImgPipeline.from_pretrained( a__ , safety_checker=a__ , ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) pipe.enable_attention_slicing() __snake_case = '''A fantasy landscape, trending on artstation''' __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , generator=a__ , output_type='''np''' , ) __snake_case = output.images[0] __snake_case = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) __snake_case = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Optional[Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a (self : Dict ): """simple docstring""" __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __snake_case = init_image.resize((768, 512) ) __snake_case = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' ) __snake_case = '''BAAI/AltDiffusion''' __snake_case = AltDiffusionImgaImgPipeline.from_pretrained( a__ , safety_checker=a__ , ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) pipe.enable_attention_slicing() __snake_case = '''A fantasy landscape, trending on artstation''' __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , generator=a__ , output_type='''np''' , ) __snake_case = output.images[0] assert image.shape == (512, 768, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
24
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = num_labels __snake_case = scope __snake_case = range_bbox def a (self : Optional[int] ): """simple docstring""" __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case = bbox[i, j, 3] __snake_case = bbox[i, j, 1] __snake_case = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case = bbox[i, j, 2] __snake_case = bbox[i, j, 0] __snake_case = t __snake_case = None if self.use_input_mask: __snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __snake_case = None if self.use_token_type_ids: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def a (self : List[str] ): """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ): """simple docstring""" __snake_case = LiltModel(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ ) __snake_case = model(a__ , bbox=a__ , token_type_ids=a__ ) __snake_case = model(a__ , bbox=a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ): """simple docstring""" __snake_case = self.num_labels __snake_case = LiltForTokenClassification(config=a__ ) model.to(a__ ) model.eval() __snake_case = model( a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ): """simple docstring""" __snake_case = LiltForQuestionAnswering(config=a__ ) model.to(a__ ) model.eval() __snake_case = model( a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a (self : Tuple ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : List[Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) A_ : Any = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) A_ : Optional[int] = False A_ : List[Any] = False def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ): """simple docstring""" return True def a (self : Union[str, Any] ): """simple docstring""" __snake_case = LiltModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 ) def a (self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() def a (self : int ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def a (self : List[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case = type self.model_tester.create_and_check_model(*a__ ) def a (self : Optional[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a__ ) def a (self : Union[str, Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a__ ) @slow def a (self : Optional[int] ): """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = LiltModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) @require_torch @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Tuple ): """simple docstring""" __snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ ) __snake_case = torch.tensor([[1, 2]] , device=a__ ) __snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ ) # forward pass with torch.no_grad(): __snake_case = model(input_ids=a__ , bbox=a__ ) __snake_case = torch.Size([1, 2, 768] ) __snake_case = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , ) self.assertTrue(outputs.last_hidden_state.shape , a__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
24
1
from math import factorial snake_case_ = {str(d): factorial(d) for d in range(10)} def lowerCamelCase__ ( snake_case_ : int ) -> int: return sum(DIGIT_FACTORIAL[d] for d in str(snake_case_ ) ) def lowerCamelCase__ ( ) -> int: __snake_case = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , snake_case_ ) if sum_of_digit_factorial(snake_case_ ) == i ) if __name__ == "__main__": print(F'{solution() = }')
24
import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class SCREAMING_SNAKE_CASE__ : @staticmethod def a (*a__ : List[str] , **a__ : List[str] ): """simple docstring""" pass def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]: return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. snake_case_ = ( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): A_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def a (self : List[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ): """simple docstring""" __snake_case = pipeline( '''document-question-answering''' , model=a__ , tokenizer=a__ , image_processor=a__ ) __snake_case = INVOICE_URL __snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) ) __snake_case = '''What is the placebo?''' __snake_case = [ { '''image''': load_image(a__ ), '''question''': question, }, { '''image''': image, '''question''': question, }, { '''image''': image, '''question''': question, '''word_boxes''': word_boxes, }, ] return dqa_pipeline, examples def a (self : Union[str, Any] , a__ : Optional[int] , a__ : Dict ): """simple docstring""" __snake_case = dqa_pipeline(a__ , top_k=2 ) self.assertEqual( a__ , [ [ {'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )}, {'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def a (self : Dict ): """simple docstring""" __snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' ) __snake_case = INVOICE_URL __snake_case = '''How many cats are there?''' __snake_case = [ {'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39}, {'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40}, ] __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ ) __snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably __snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual(a__ , [] ) # We can optionnally pass directly the words and bounding boxes __snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' __snake_case = [] __snake_case = [] __snake_case = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 ) self.assertEqual(a__ , [] ) @slow @require_torch @require_detectrona @require_pytesseract def a (self : str ): """simple docstring""" __snake_case = pipeline( '''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def a (self : List[Any] ): """simple docstring""" __snake_case = pipeline( '''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def a (self : Tuple ): """simple docstring""" __snake_case = AutoTokenizer.from_pretrained( '''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ ) __snake_case = pipeline( '''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) __snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) __snake_case = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] ] * 2 , ) __snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) ) # This model should also work if `image` is set to None __snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def a (self : Dict ): """simple docstring""" __snake_case = AutoTokenizer.from_pretrained( '''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ ) __snake_case = pipeline( '''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , max_seq_len=50 , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] ] * 2 , ) __snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) ) # This model should also work if `image` is set to None __snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) @slow @require_torch def a (self : Tuple ): """simple docstring""" __snake_case = pipeline( '''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{'''answer''': '''us-001'''}] ) @require_tf @unittest.skip('''Document question answering not implemented in TF''' ) def a (self : List[str] ): """simple docstring""" pass
24
1
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 snake_case_ = get_tests_dir('fixtures') class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : List[str] ): """simple docstring""" __snake_case = mock.Mock() __snake_case = 500 __snake_case = {} __snake_case = HTTPError __snake_case = {} # Download this model to make sure it's in the cache. __snake_case = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=a__ ) as mock_head: __snake_case = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' ) # This check we did call the fake head request mock_head.assert_called() def a (self : Any ): """simple docstring""" __snake_case = WavaVecaFeatureExtractor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' ) @is_staging_test class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @classmethod def a (cls : List[Any] ): """simple docstring""" __snake_case = TOKEN HfFolder.save_token(a__ ) @classmethod def a (cls : Optional[int] ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-feature-extractor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' ) except HTTPError: pass def a (self : Optional[int] ): """simple docstring""" __snake_case = WavaVecaFeatureExtractor.from_pretrained(a__ ) feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token ) __snake_case = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a__ , getattr(a__ , a__ ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-feature-extractor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( a__ , repo_id='''test-feature-extractor''' , push_to_hub=a__ , use_auth_token=self._token ) __snake_case = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a__ , getattr(a__ , a__ ) ) def a (self : str ): """simple docstring""" __snake_case = WavaVecaFeatureExtractor.from_pretrained(a__ ) feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token ) __snake_case = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a__ , getattr(a__ , a__ ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( a__ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=a__ , use_auth_token=self._token ) __snake_case = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a__ , getattr(a__ , a__ ) ) def a (self : Any ): """simple docstring""" CustomFeatureExtractor.register_for_auto_class() __snake_case = CustomFeatureExtractor.from_pretrained(a__ ) feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , ) __snake_case = AutoFeatureExtractor.from_pretrained( f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=a__ ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
24
from __future__ import annotations def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]: __snake_case = [] __snake_case = [] __snake_case = 0 __snake_case = sum(snake_case_ ) create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) return result def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None: if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum: return if sum(snake_case_ ) == max_sum: result.append(snake_case_ ) return for index in range(snake_case_ , len(snake_case_ ) ): create_state_space_tree( snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , ) snake_case_ = [3, 34, 4, 12, 5, 2] snake_case_ = 9 snake_case_ = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
24
1
from collections import deque from math import floor from random import random from time import time class SCREAMING_SNAKE_CASE__ : def __init__(self : Any ): """simple docstring""" __snake_case = {} def a (self : List[str] , a__ : str , a__ : Union[str, Any] , a__ : Optional[int]=1 ): """simple docstring""" if self.graph.get(a__ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: __snake_case = [[w, v]] if not self.graph.get(a__ ): __snake_case = [] def a (self : Any ): """simple docstring""" return list(self.graph ) def a (self : Tuple , a__ : Any , a__ : Any ): """simple docstring""" if self.graph.get(a__ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(a__ ) def a (self : Any , a__ : List[Any]=-2 , a__ : Optional[int]=-1 ): """simple docstring""" if s == d: return [] __snake_case = [] __snake_case = [] if s == -2: __snake_case = list(self.graph )[0] stack.append(a__ ) visited.append(a__ ) __snake_case = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __snake_case = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(a__ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) __snake_case = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(a__ ) != 0: __snake_case = stack[len(a__ ) - 1] else: __snake_case = ss # check if se have reached the starting point if len(a__ ) == 0: return visited def a (self : List[str] , a__ : Dict=-1 ): """simple docstring""" if c == -1: __snake_case = floor(random() * 1_0000 ) + 10 for i in range(a__ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): __snake_case = floor(random() * c ) + 1 if n != i: self.add_pair(a__ , a__ , 1 ) def a (self : Dict , a__ : List[Any]=-2 ): """simple docstring""" __snake_case = deque() __snake_case = [] if s == -2: __snake_case = list(self.graph )[0] d.append(a__ ) visited.append(a__ ) while d: __snake_case = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def a (self : List[Any] , a__ : Dict ): """simple docstring""" __snake_case = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def a (self : Tuple , a__ : Tuple ): """simple docstring""" return len(self.graph[u] ) def a (self : Optional[int] , a__ : Dict=-2 ): """simple docstring""" __snake_case = [] __snake_case = [] if s == -2: __snake_case = list(self.graph )[0] stack.append(a__ ) visited.append(a__ ) __snake_case = s __snake_case = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __snake_case = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __snake_case = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(a__ ) != 0: __snake_case = stack[len(a__ ) - 1] else: __snake_case = ss # check if se have reached the starting point if len(a__ ) == 0: return sorted_nodes def a (self : List[Any] ): """simple docstring""" __snake_case = [] __snake_case = [] __snake_case = list(self.graph )[0] stack.append(a__ ) visited.append(a__ ) __snake_case = -2 __snake_case = [] __snake_case = s __snake_case = False __snake_case = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __snake_case = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __snake_case = len(a__ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __snake_case = node[1] break # check if all the children are visited if s == ss: stack.pop() __snake_case = True if len(a__ ) != 0: __snake_case = stack[len(a__ ) - 1] else: __snake_case = False indirect_parents.append(a__ ) __snake_case = s __snake_case = ss # check if se have reached the starting point if len(a__ ) == 0: return list(a__ ) def a (self : Union[str, Any] ): """simple docstring""" __snake_case = [] __snake_case = [] __snake_case = list(self.graph )[0] stack.append(a__ ) visited.append(a__ ) __snake_case = -2 __snake_case = [] __snake_case = s __snake_case = False __snake_case = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __snake_case = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __snake_case = len(a__ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __snake_case = node[1] break # check if all the children are visited if s == ss: stack.pop() __snake_case = True if len(a__ ) != 0: __snake_case = stack[len(a__ ) - 1] else: __snake_case = False indirect_parents.append(a__ ) __snake_case = s __snake_case = ss # check if se have reached the starting point if len(a__ ) == 0: return False def a (self : Optional[Any] , a__ : Tuple=-2 , a__ : List[Any]=-1 ): """simple docstring""" __snake_case = time() self.dfs(a__ , a__ ) __snake_case = time() return end - begin def a (self : int , a__ : Union[str, Any]=-2 ): """simple docstring""" __snake_case = time() self.bfs(a__ ) __snake_case = time() return end - begin class SCREAMING_SNAKE_CASE__ : def __init__(self : List[str] ): """simple docstring""" __snake_case = {} def a (self : Dict , a__ : List[str] , a__ : Optional[Any] , a__ : Dict=1 ): """simple docstring""" if self.graph.get(a__ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist __snake_case = [[w, v]] # add the other way if self.graph.get(a__ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist __snake_case = [[w, u]] def a (self : Dict , a__ : Optional[int] , a__ : Optional[Any] ): """simple docstring""" if self.graph.get(a__ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(a__ ) # the other way round if self.graph.get(a__ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(a__ ) def a (self : Union[str, Any] , a__ : int=-2 , a__ : Union[str, Any]=-1 ): """simple docstring""" if s == d: return [] __snake_case = [] __snake_case = [] if s == -2: __snake_case = list(self.graph )[0] stack.append(a__ ) visited.append(a__ ) __snake_case = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __snake_case = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(a__ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) __snake_case = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(a__ ) != 0: __snake_case = stack[len(a__ ) - 1] else: __snake_case = ss # check if se have reached the starting point if len(a__ ) == 0: return visited def a (self : List[Any] , a__ : Tuple=-1 ): """simple docstring""" if c == -1: __snake_case = floor(random() * 1_0000 ) + 10 for i in range(a__ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): __snake_case = floor(random() * c ) + 1 if n != i: self.add_pair(a__ , a__ , 1 ) def a (self : int , a__ : Union[str, Any]=-2 ): """simple docstring""" __snake_case = deque() __snake_case = [] if s == -2: __snake_case = list(self.graph )[0] d.append(a__ ) visited.append(a__ ) while d: __snake_case = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def a (self : int , a__ : Dict ): """simple docstring""" return len(self.graph[u] ) def a (self : List[str] ): """simple docstring""" __snake_case = [] __snake_case = [] __snake_case = list(self.graph )[0] stack.append(a__ ) visited.append(a__ ) __snake_case = -2 __snake_case = [] __snake_case = s __snake_case = False __snake_case = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __snake_case = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __snake_case = len(a__ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __snake_case = node[1] break # check if all the children are visited if s == ss: stack.pop() __snake_case = True if len(a__ ) != 0: __snake_case = stack[len(a__ ) - 1] else: __snake_case = False indirect_parents.append(a__ ) __snake_case = s __snake_case = ss # check if se have reached the starting point if len(a__ ) == 0: return list(a__ ) def a (self : Any ): """simple docstring""" __snake_case = [] __snake_case = [] __snake_case = list(self.graph )[0] stack.append(a__ ) visited.append(a__ ) __snake_case = -2 __snake_case = [] __snake_case = s __snake_case = False __snake_case = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __snake_case = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __snake_case = len(a__ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __snake_case = node[1] break # check if all the children are visited if s == ss: stack.pop() __snake_case = True if len(a__ ) != 0: __snake_case = stack[len(a__ ) - 1] else: __snake_case = False indirect_parents.append(a__ ) __snake_case = s __snake_case = ss # check if se have reached the starting point if len(a__ ) == 0: return False def a (self : Optional[int] ): """simple docstring""" return list(self.graph ) def a (self : Any , a__ : List[str]=-2 , a__ : List[str]=-1 ): """simple docstring""" __snake_case = time() self.dfs(a__ , a__ ) __snake_case = time() return end - begin def a (self : List[str] , a__ : int=-2 ): """simple docstring""" __snake_case = time() self.bfs(a__ ) __snake_case = time() return end - begin
24
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = image_size __snake_case = num_channels __snake_case = num_stages __snake_case = hidden_sizes __snake_case = depths __snake_case = is_training __snake_case = use_labels __snake_case = intermediate_size __snake_case = hidden_act __snake_case = num_labels __snake_case = initializer_range __snake_case = out_features __snake_case = out_indices __snake_case = scope def a (self : Dict ): """simple docstring""" __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.num_labels ) __snake_case = self.get_config() return config, pixel_values, labels def a (self : List[str] ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ): """simple docstring""" __snake_case = ConvNextModel(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ): """simple docstring""" __snake_case = ConvNextForImageClassification(a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ): """simple docstring""" __snake_case = ConvNextBackbone(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __snake_case = None __snake_case = ConvNextBackbone(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a (self : Tuple ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case = config_and_inputs __snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : Dict = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) A_ : Optional[Any] = ( {'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification} if is_torch_available() else {} ) A_ : Dict = True A_ : Optional[Any] = False A_ : int = False A_ : int = False A_ : List[str] = False def a (self : List[str] ): """simple docstring""" __snake_case = ConvNextModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 ) def a (self : Tuple ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a (self : str ): """simple docstring""" return @unittest.skip(reason='''ConvNext does not use inputs_embeds''' ) def a (self : int ): """simple docstring""" pass @unittest.skip(reason='''ConvNext does not support input and output embeddings''' ) def a (self : Dict ): """simple docstring""" pass @unittest.skip(reason='''ConvNext does not use feedforward chunking''' ) def a (self : List[Any] ): """simple docstring""" pass def a (self : Optional[Any] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a__ ) def a (self : List[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def a (self : Dict ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*a__ ) def a (self : Dict ): """simple docstring""" def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ): __snake_case = model_class(a__ ) model.to(a__ ) model.eval() with torch.no_grad(): __snake_case = model(**self._prepare_for_class(a__ , a__ ) ) __snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case = self.model_tester.num_stages self.assertEqual(len(a__ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = True check_hidden_states_output(a__ , a__ , a__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case = True check_hidden_states_output(a__ , a__ , a__ ) def a (self : Optional[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a__ ) @slow def a (self : Any ): """simple docstring""" for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = ConvNextModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) def lowerCamelCase__ ( ) -> List[str]: __snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def a (self : Tuple ): """simple docstring""" return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None @slow def a (self : Optional[Any] ): """simple docstring""" __snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ ) __snake_case = self.default_image_processor __snake_case = prepare_img() __snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ ) # forward pass with torch.no_grad(): __snake_case = model(**a__ ) # verify the logits __snake_case = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , a__ ) __snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ): A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else () A_ : List[Any] = ConvNextConfig A_ : Optional[Any] = False def a (self : Optional[int] ): """simple docstring""" __snake_case = ConvNextModelTester(self )
24
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) snake_case_ = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ['BeitFeatureExtractor'] snake_case_ = ['BeitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BeitForImageClassification', 'BeitForMaskedImageModeling', 'BeitForSemanticSegmentation', 'BeitModel', 'BeitPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'FlaxBeitForImageClassification', 'FlaxBeitForMaskedImageModeling', 'FlaxBeitModel', 'FlaxBeitPreTrainedModel', ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
24
def lowerCamelCase__ ( ) -> int: return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(snake_case_ , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
24
1
from manim import * class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def a (self : List[str] ): """simple docstring""" __snake_case = Rectangle(height=0.5 , width=0.5 ) __snake_case = Rectangle(height=0.2_5 , width=0.2_5 ) __snake_case = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) __snake_case = [mem.copy() for i in range(6 )] __snake_case = [mem.copy() for i in range(6 )] __snake_case = VGroup(*a__ ).arrange(a__ , buff=0 ) __snake_case = VGroup(*a__ ).arrange(a__ , buff=0 ) __snake_case = VGroup(a__ , a__ ).arrange(a__ , buff=0 ) __snake_case = Text('''CPU''' , font_size=24 ) __snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(a__ ) __snake_case = [mem.copy() for i in range(4 )] __snake_case = VGroup(*a__ ).arrange(a__ , buff=0 ) __snake_case = Text('''GPU''' , font_size=24 ) __snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ ) gpu.move_to([-1, -1, 0] ) self.add(a__ ) __snake_case = [mem.copy() for i in range(6 )] __snake_case = VGroup(*a__ ).arrange(a__ , buff=0 ) __snake_case = Text('''Model''' , font_size=24 ) __snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ ) model.move_to([3, -1.0, 0] ) self.add(a__ ) __snake_case = [] __snake_case = [] __snake_case = [] for i, rect in enumerate(a__ ): rect.set_stroke(a__ ) __snake_case = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=a__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=a__ , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=a__ , buff=0.0 ) self.add(a__ ) model_cpu_arr.append(a__ ) self.add(*a__ , *a__ , *a__ ) __snake_case = [mem.copy() for i in range(6 )] __snake_case = VGroup(*a__ ).arrange(a__ , buff=0 ) __snake_case = Text('''Loaded Checkpoint''' , font_size=24 ) __snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ ) checkpoint.move_to([3, 0.5, 0] ) self.add(a__ ) __snake_case = [] __snake_case = [] for i, rect in enumerate(a__ ): __snake_case = fill.copy().set_fill(a__ , opacity=0.7 ) target.move_to(a__ ) ckpt_arr.append(a__ ) __snake_case = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(a__ ) self.add(*a__ , *a__ ) __snake_case = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __snake_case = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(a__ , a__ ) __snake_case = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(a__ ) __snake_case = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) __snake_case = [meta_mem.copy() for i in range(6 )] __snake_case = [meta_mem.copy() for i in range(6 )] __snake_case = VGroup(*a__ ).arrange(a__ , buff=0 ) __snake_case = VGroup(*a__ ).arrange(a__ , buff=0 ) __snake_case = VGroup(a__ , a__ ).arrange(a__ , buff=0 ) __snake_case = Text('''Disk''' , font_size=24 ) __snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ ) disk.move_to([-4.0, -1.2_5, 0] ) self.play(Write(a__ , run_time=3 ) , Write(a__ , run_time=1 ) , Create(a__ , run_time=1 ) ) __snake_case = [] for i, rect in enumerate(a__ ): __snake_case = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(a__ , run_time=1.5 ) ) self.play(*a__ ) self.play(FadeOut(a__ ) ) __snake_case = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(a__ , run_time=3 ) ) self.play( FadeOut(a__ , a__ , *a__ , *a__ ) , ) self.wait()
24
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : List[Any] = BartphoTokenizer A_ : List[str] = False A_ : Optional[Any] = True def a (self : Tuple ): """simple docstring""" super().setUp() __snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] __snake_case = dict(zip(a__ , range(len(a__ ) ) ) ) __snake_case = {'''unk_token''': '''<unk>'''} __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ) with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(f"""{token} {vocab_tokens[token]}\n""" ) __snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def a (self : str , **a__ : str ): """simple docstring""" kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ ) def a (self : str , a__ : Any ): """simple docstring""" __snake_case = '''This is a là test''' __snake_case = '''This is a<unk><unk> test''' return input_text, output_text def a (self : Dict ): """simple docstring""" __snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map ) __snake_case = '''This is a là test''' __snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split() __snake_case = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) __snake_case = tokens + [tokenizer.unk_token] __snake_case = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
24
1
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__(self : List[Any] , a__ : str , a__ : str=13 , a__ : Any=32 , a__ : List[Any]=3 , a__ : Optional[int]=4 , a__ : Any=[10, 20, 30, 40] , a__ : Any=[2, 2, 3, 2] , a__ : Tuple=True , a__ : int=True , a__ : str=37 , a__ : List[str]="gelu" , a__ : Union[str, Any]=10 , a__ : List[Any]=0.0_2 , a__ : Union[str, Any]=["stage2", "stage3", "stage4"] , a__ : Optional[int]=3 , a__ : Dict=None , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = image_size __snake_case = num_channels __snake_case = num_stages __snake_case = hidden_sizes __snake_case = depths __snake_case = is_training __snake_case = use_labels __snake_case = intermediate_size __snake_case = hidden_act __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = out_features __snake_case = num_labels __snake_case = scope __snake_case = num_stages def a (self : Optional[Any] ): """simple docstring""" __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case = self.get_config() return config, pixel_values, labels def a (self : Any ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def a (self : Optional[Any] ): """simple docstring""" return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=a__ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=a__ , loss_ignore_index=255 , num_labels=self.num_labels , ) def a (self : Tuple , a__ : int , a__ : Optional[int] , a__ : Tuple ): """simple docstring""" __snake_case = UperNetForSemanticSegmentation(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def a (self : Optional[Any] ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : int = (UperNetForSemanticSegmentation,) if is_torch_available() else () A_ : List[str] = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {} A_ : int = False A_ : int = False A_ : Tuple = False A_ : Any = False A_ : Optional[Any] = False A_ : Optional[Any] = False def a (self : Dict ): """simple docstring""" __snake_case = UperNetModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 ) def a (self : Dict ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a (self : int ): """simple docstring""" return def a (self : List[str] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a__ ) def a (self : List[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a__ ) @unittest.skip(reason='''UperNet does not use inputs_embeds''' ) def a (self : Union[str, Any] ): """simple docstring""" pass @unittest.skip(reason='''UperNet does not support input and output embeddings''' ) def a (self : Dict ): """simple docstring""" pass @unittest.skip(reason='''UperNet does not have a base model''' ) def a (self : str ): """simple docstring""" pass @unittest.skip(reason='''UperNet does not have a base model''' ) def a (self : str ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def a (self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def a (self : Optional[int] ): """simple docstring""" pass def a (self : Any ): """simple docstring""" def check_hidden_states_output(a__ : List[Any] , a__ : List[str] , a__ : int ): __snake_case = model_class(a__ ) model.to(a__ ) model.eval() with torch.no_grad(): __snake_case = model(**self._prepare_for_class(a__ , a__ ) ) __snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case = self.model_tester.num_stages self.assertEqual(len(a__ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = True check_hidden_states_output(a__ , a__ , a__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case = True check_hidden_states_output(a__ , a__ , a__ ) def a (self : Dict ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = _config_zero_init(a__ ) __snake_case = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: __snake_case = model_class(config=a__ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip(reason='''UperNet does not have tied weights''' ) def a (self : Union[str, Any] ): """simple docstring""" pass @slow def a (self : int ): """simple docstring""" for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = UperNetForSemanticSegmentation.from_pretrained(a__ ) self.assertIsNotNone(a__ ) def lowerCamelCase__ ( ) -> int: __snake_case = hf_hub_download( repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' ) __snake_case = Image.open(snake_case_ ).convert('''RGB''' ) return image @require_torch @require_vision @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Optional[int] ): """simple docstring""" __snake_case = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' ) __snake_case = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(a__ ) __snake_case = prepare_img() __snake_case = processor(images=a__ , return_tensors='''pt''' ).to(a__ ) with torch.no_grad(): __snake_case = model(**a__ ) __snake_case = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , a__ ) __snake_case = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(a__ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , a__ , atol=1E-4 ) ) def a (self : Tuple ): """simple docstring""" __snake_case = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' ) __snake_case = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(a__ ) __snake_case = prepare_img() __snake_case = processor(images=a__ , return_tensors='''pt''' ).to(a__ ) with torch.no_grad(): __snake_case = model(**a__ ) __snake_case = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , a__ ) __snake_case = torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(a__ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , a__ , atol=1E-4 ) )
24
def lowerCamelCase__ ( snake_case_ : int ) -> int: if not isinstance(snake_case_ , snake_case_ ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) __snake_case = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
24
1
class SCREAMING_SNAKE_CASE__ : def __init__(self : Union[str, Any] , a__ : int ): """simple docstring""" __snake_case = n __snake_case = [None] * self.n __snake_case = 0 # index of the first element __snake_case = 0 __snake_case = 0 def __len__(self : List[str] ): """simple docstring""" return self.size def a (self : str ): """simple docstring""" return self.size == 0 def a (self : Dict ): """simple docstring""" return False if self.is_empty() else self.array[self.front] def a (self : Optional[Any] , a__ : List[Any] ): """simple docstring""" if self.size >= self.n: raise Exception('''QUEUE IS FULL''' ) __snake_case = data __snake_case = (self.rear + 1) % self.n self.size += 1 return self def a (self : str ): """simple docstring""" if self.size == 0: raise Exception('''UNDERFLOW''' ) __snake_case = self.array[self.front] __snake_case = None __snake_case = (self.front + 1) % self.n self.size -= 1 return temp
24
from math import loga def lowerCamelCase__ ( snake_case_ : int ) -> int: if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(snake_case_ , snake_case_ ): raise TypeError('''Input value must be a \'int\' type''' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
24
1
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { 'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json', } class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : Any = 'lxmert' A_ : List[Any] = {} def __init__(self : int , a__ : Any=3_0522 , a__ : Optional[int]=768 , a__ : Dict=12 , a__ : int=9500 , a__ : Dict=1600 , a__ : Any=400 , a__ : List[str]=3072 , a__ : List[str]="gelu" , a__ : int=0.1 , a__ : Dict=0.1 , a__ : str=512 , a__ : Any=2 , a__ : Any=0.0_2 , a__ : Union[str, Any]=1E-12 , a__ : str=9 , a__ : Optional[Any]=5 , a__ : int=5 , a__ : Optional[int]=2048 , a__ : Union[str, Any]=4 , a__ : Any=6.6_7 , a__ : List[Any]=True , a__ : str=True , a__ : Optional[Any]=True , a__ : Dict=True , a__ : Dict=True , a__ : int=True , a__ : Union[str, Any]=True , **a__ : List[Any] , ): """simple docstring""" __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_attention_heads __snake_case = hidden_act __snake_case = intermediate_size __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = num_qa_labels __snake_case = num_object_labels __snake_case = num_attr_labels __snake_case = l_layers __snake_case = x_layers __snake_case = r_layers __snake_case = visual_feat_dim __snake_case = visual_pos_dim __snake_case = visual_loss_normalizer __snake_case = task_matched __snake_case = task_mask_lm __snake_case = task_obj_predict __snake_case = task_qa __snake_case = visual_obj_loss __snake_case = visual_attr_loss __snake_case = visual_feat_loss __snake_case = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**a__ )
24
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING snake_case_ = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ): """simple docstring""" super().__init__(*a__ , **a__ ) requires_backends(self , '''vision''' ) self.check_model_type(a__ ) def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ): """simple docstring""" return super().__call__(a__ , **a__ ) def a (self : Dict , **a__ : Any ): """simple docstring""" return {}, {}, {} def a (self : List[str] , a__ : Any ): """simple docstring""" __snake_case = load_image(a__ ) __snake_case = image.size __snake_case = self.image_processor(images=a__ , return_tensors=self.framework ) return model_inputs def a (self : int , a__ : List[Any] ): """simple docstring""" __snake_case = self.model(**a__ ) return model_outputs def a (self : int , a__ : str ): """simple docstring""" __snake_case = model_outputs.predicted_depth __snake_case = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ ) __snake_case = prediction.squeeze().cpu().numpy() __snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' ) __snake_case = Image.fromarray(a__ ) __snake_case = {} __snake_case = predicted_depth __snake_case = depth return output_dict
24
1
import math snake_case_ = 10 snake_case_ = 7 snake_case_ = BALLS_PER_COLOUR * NUM_COLOURS def lowerCamelCase__ ( snake_case_ : int = 20 ) -> str: __snake_case = math.comb(snake_case_ , snake_case_ ) __snake_case = math.comb(NUM_BALLS - BALLS_PER_COLOUR , snake_case_ ) __snake_case = NUM_COLOURS * (1 - missing_colour / total) return f"""{result:.9f}""" if __name__ == "__main__": print(solution(20))
24
from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def lowerCamelCase__ ( ) -> Any: import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join __snake_case = '''__test_patch_submodule_mock__''' with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def lowerCamelCase__ ( ) -> Any: assert _test_patching.open is open __snake_case = '''__test_patch_submodule_builtin_mock__''' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , '''open''' , snake_case_ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def lowerCamelCase__ ( ) -> List[str]: # pandas.read_csv is not present in _test_patching __snake_case = '''__test_patch_submodule_missing_mock__''' with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ): pass def lowerCamelCase__ ( ) -> Union[str, Any]: # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point __snake_case = '''__test_patch_submodule_missing_builtin_mock__''' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , '''len''' , snake_case_ ) is None with patch_submodule(_test_patching , '''len''' , snake_case_ ): assert _test_patching.len is mock assert _test_patching.len is len def lowerCamelCase__ ( ) -> Union[str, Any]: __snake_case = '''__test_patch_submodule_start_and_stop_mock__''' __snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def lowerCamelCase__ ( ) -> Optional[int]: from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join __snake_case = '''__test_patch_submodule_successive_join__''' __snake_case = '''__test_patch_submodule_successive_dirname__''' __snake_case = '''__test_patch_submodule_successive_rename__''' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ): with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ): with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ): with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ): with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def lowerCamelCase__ ( ) -> Tuple: __snake_case = '''__test_patch_submodule_doesnt_exist_mock__''' with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ): pass with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ): pass
24
1
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def __init__(self : str , a__ : Union[str, "sqlalchemy.sql.Selectable"] , a__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , a__ : Optional[Features] = None , a__ : str = None , a__ : bool = False , **a__ : Dict , ): """simple docstring""" super().__init__(features=a__ , cache_dir=a__ , keep_in_memory=a__ , **a__ ) __snake_case = Sql( cache_dir=a__ , features=a__ , sql=a__ , con=a__ , **a__ , ) def a (self : int ): """simple docstring""" __snake_case = None __snake_case = None __snake_case = None __snake_case = None self.builder.download_and_prepare( download_config=a__ , download_mode=a__ , verification_mode=a__ , base_path=a__ , ) # Build dataset for splits __snake_case = self.builder.as_dataset( split='''train''' , verification_mode=a__ , in_memory=self.keep_in_memory ) return dataset class SCREAMING_SNAKE_CASE__ : def __init__(self : int , a__ : Dataset , a__ : str , a__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , a__ : Optional[int] = None , a__ : Optional[int] = None , **a__ : str , ): """simple docstring""" if num_proc is not None and num_proc <= 0: raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" ) __snake_case = dataset __snake_case = name __snake_case = con __snake_case = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __snake_case = num_proc __snake_case = to_sql_kwargs def a (self : List[str] ): """simple docstring""" __snake_case = self.to_sql_kwargs.pop('''sql''' , a__ ) __snake_case = self.to_sql_kwargs.pop('''con''' , a__ ) __snake_case = self.to_sql_kwargs.pop('''index''' , a__ ) __snake_case = self._write(index=a__ , **self.to_sql_kwargs ) return written def a (self : Optional[Any] , a__ : List[str] ): """simple docstring""" __snake_case , __snake_case , __snake_case = args __snake_case = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs __snake_case = query_table( table=self.dataset.data , key=slice(a__ , offset + self.batch_size ) , indices=self.dataset._indices , ) __snake_case = batch.to_pandas() __snake_case = df.to_sql(self.name , self.con , index=a__ , **a__ ) return num_rows or len(a__ ) def a (self : Union[str, Any] , a__ : Dict , **a__ : Union[str, Any] ): """simple docstring""" __snake_case = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: __snake_case , __snake_case = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a__ , a__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
24
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) snake_case_ = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE__ : A_ : str = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} ) A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} ) @dataclass class SCREAMING_SNAKE_CASE__ : A_ : str = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) A_ : Optional[str] = field( default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , ) A_ : Optional[int] = field( default=1_024 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A_ : Optional[int] = field( default=128 , metadata={ 'help': ( 'The maximum total sequence length for target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A_ : Optional[int] = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for validation target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded. ' 'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ' 'during ``evaluate`` and ``predict``.' ) } , ) A_ : Optional[int] = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for test target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} ) A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} ) A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} ) A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} ) A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} ) A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} ) A_ : bool = field( default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , ) def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str: logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) ) def lowerCamelCase__ ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses() check_output_dir(snake_case_ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , snake_case_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(snake_case_ , snake_case_ , snake_case_ ): assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) ) __snake_case = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __snake_case = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(snake_case_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __snake_case = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(snake_case_ , snake_case_ ): __snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(snake_case_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __snake_case = SeqaSeqDataset # Get datasets __snake_case = ( dataset_class( snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) __snake_case = ( dataset_class( snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __snake_case = ( dataset_class( snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer __snake_case = ( build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None ) __snake_case = SeqaSeqTrainer( model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator( snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , ) __snake_case = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) __snake_case = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __snake_case = train_result.metrics __snake_case = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __snake_case = trainer.evaluate(metric_key_prefix='''val''' ) __snake_case = data_args.n_val __snake_case = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.do_predict: logger.info('''*** Predict ***''' ) __snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' ) __snake_case = test_output.metrics __snake_case = data_args.n_test if trainer.is_world_process_zero(): __snake_case = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.predict_with_generate: __snake_case = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ ) __snake_case = lmap(str.strip , snake_case_ ) write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
24
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case_ = { 'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'], 'tokenization_luke': ['LukeTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST', 'LukeForEntityClassification', 'LukeForEntityPairClassification', 'LukeForEntitySpanClassification', 'LukeForMultipleChoice', 'LukeForQuestionAnswering', 'LukeForSequenceClassification', 'LukeForTokenClassification', 'LukeForMaskedLM', 'LukeModel', 'LukePreTrainedModel', ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
24
from math import pi def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
24
1
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = num_labels __snake_case = scope __snake_case = range_bbox def a (self : Optional[int] ): """simple docstring""" __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case = bbox[i, j, 3] __snake_case = bbox[i, j, 1] __snake_case = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case = bbox[i, j, 2] __snake_case = bbox[i, j, 0] __snake_case = t __snake_case = None if self.use_input_mask: __snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __snake_case = None if self.use_token_type_ids: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def a (self : List[str] ): """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ): """simple docstring""" __snake_case = LiltModel(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ ) __snake_case = model(a__ , bbox=a__ , token_type_ids=a__ ) __snake_case = model(a__ , bbox=a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ): """simple docstring""" __snake_case = self.num_labels __snake_case = LiltForTokenClassification(config=a__ ) model.to(a__ ) model.eval() __snake_case = model( a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ): """simple docstring""" __snake_case = LiltForQuestionAnswering(config=a__ ) model.to(a__ ) model.eval() __snake_case = model( a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a (self : Tuple ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : List[Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) A_ : Any = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) A_ : Optional[int] = False A_ : List[Any] = False def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ): """simple docstring""" return True def a (self : Union[str, Any] ): """simple docstring""" __snake_case = LiltModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 ) def a (self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() def a (self : int ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def a (self : List[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case = type self.model_tester.create_and_check_model(*a__ ) def a (self : Optional[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a__ ) def a (self : Union[str, Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a__ ) @slow def a (self : Optional[int] ): """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = LiltModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) @require_torch @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Tuple ): """simple docstring""" __snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ ) __snake_case = torch.tensor([[1, 2]] , device=a__ ) __snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ ) # forward pass with torch.no_grad(): __snake_case = model(input_ids=a__ , bbox=a__ ) __snake_case = torch.Size([1, 2, 768] ) __snake_case = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , ) self.assertTrue(outputs.last_hidden_state.shape , a__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
24
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[Any] = 'vit_msn' def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ): """simple docstring""" super().__init__(**a__ ) __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = image_size __snake_case = patch_size __snake_case = num_channels __snake_case = qkv_bias
24
1
from __future__ import annotations import math from collections.abc import Callable def lowerCamelCase__ ( snake_case_ : Callable[[int | float], int | float] , snake_case_ : int | float , snake_case_ : int | float , snake_case_ : int = 100 , ) -> float: __snake_case = x_start __snake_case = fnc(snake_case_ ) __snake_case = 0.0 for _ in range(snake_case_ ): # Approximates curve as a sequence of linear lines and sums their length __snake_case = (x_end - x_start) / steps + xa __snake_case = fnc(snake_case_ ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step __snake_case = xa __snake_case = fxa return length if __name__ == "__main__": def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> int: return math.sin(10 * x ) print('f(x) = sin(10 * x)') print('The length of the curve from x = -10 to x = 10 is:') snake_case_ = 10 while i <= 100000: print(F'With {i} steps: {line_length(f, -10, 10, i)}') i *= 10
24
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def a (self : int , a__ : List[Any]=0 ): """simple docstring""" __snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) ) __snake_case = np.random.RandomState(a__ ) __snake_case = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.7_5, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def a (self : List[Any] ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def a (self : Dict ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : List[str] ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) # warmup pass to apply optimizations __snake_case = pipe(**self.get_dummy_inputs() ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : Any ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : Dict ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : List[str] ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def a (self : List[str] ): """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a (self : Optional[Any] ): """simple docstring""" __snake_case = ort.SessionOptions() __snake_case = False return options def a (self : Optional[Any] ): """simple docstring""" __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __snake_case = init_image.resize((768, 512) ) # using the PNDM scheduler by default __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = '''A fantasy landscape, trending on artstation''' __snake_case = np.random.RandomState(0 ) __snake_case = pipe( prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , ) __snake_case = output.images __snake_case = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def a (self : Dict ): """simple docstring""" __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __snake_case = init_image.resize((768, 512) ) __snake_case = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = '''A fantasy landscape, trending on artstation''' __snake_case = np.random.RandomState(0 ) __snake_case = pipe( prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , ) __snake_case = output.images __snake_case = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
24
1
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal snake_case_ = logging.get_logger(__name__) snake_case_ = TypeVar('DatasetType', Dataset, IterableDataset) def lowerCamelCase__ ( snake_case_ : List[DatasetType] , snake_case_ : Optional[List[float]] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[DatasetInfo] = None , snake_case_ : Optional[NamedSplit] = None , snake_case_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType: from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('''Unable to interleave an empty list of datasets.''' ) for i, dataset in enumerate(snake_case_ ): if not isinstance(snake_case_ , (Dataset, IterableDataset) ): if isinstance(snake_case_ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ '''is an empty dataset dictionary.''' ) raise ValueError( f"""Dataset at position {i} has at least one split: {list(snake_case_ )}\n""" f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(snake_case_ ) )}']""" ) raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case_ ).__name__}.""" ) if i == 0: __snake_case , __snake_case = ( (Dataset, IterableDataset) if isinstance(snake_case_ , snake_case_ ) else (IterableDataset, Dataset) ) elif not isinstance(snake_case_ , snake_case_ ): raise ValueError( f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( snake_case_ , snake_case_ , snake_case_ , info=snake_case_ , split=snake_case_ , stopping_strategy=snake_case_ ) else: return _interleave_iterable_datasets( snake_case_ , snake_case_ , snake_case_ , info=snake_case_ , split=snake_case_ , stopping_strategy=snake_case_ ) def lowerCamelCase__ ( snake_case_ : List[DatasetType] , snake_case_ : Optional[DatasetInfo] = None , snake_case_ : Optional[NamedSplit] = None , snake_case_ : int = 0 , ) -> DatasetType: if not dsets: raise ValueError('''Unable to concatenate an empty list of datasets.''' ) for i, dataset in enumerate(snake_case_ ): if not isinstance(snake_case_ , (Dataset, IterableDataset) ): if isinstance(snake_case_ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ '''is an empty dataset dictionary.''' ) raise ValueError( f"""Dataset at position {i} has at least one split: {list(snake_case_ )}\n""" f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(snake_case_ ) )}']""" ) raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case_ ).__name__}.""" ) if i == 0: __snake_case , __snake_case = ( (Dataset, IterableDataset) if isinstance(snake_case_ , snake_case_ ) else (IterableDataset, Dataset) ) elif not isinstance(snake_case_ , snake_case_ ): raise ValueError( f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(snake_case_ , info=snake_case_ , split=snake_case_ , axis=snake_case_ ) else: return _concatenate_iterable_datasets(snake_case_ , info=snake_case_ , split=snake_case_ , axis=snake_case_ )
24
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) snake_case_ = logging.getLogger(__name__) @dataclass(frozen=_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ : A_ : str A_ : str A_ : Optional[str] = None A_ : Optional[str] = None A_ : Optional[str] = None @dataclass(frozen=_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ : A_ : List[int] A_ : Optional[List[int]] = None A_ : Optional[List[int]] = None A_ : Optional[Union[int, float]] = None A_ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[InputFeatures] def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ): """simple docstring""" __snake_case = hans_processors[task]() __snake_case = os.path.join( a__ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , ) __snake_case = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case = label_list[2], label_list[1] __snake_case = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case = cached_features_file + '''.lock''' with FileLock(a__ ): if os.path.exists(a__ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case = torch.load(a__ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case = ( processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ ) ) logger.info('''Training examples: %s''' , len(a__ ) ) __snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ ) logger.info('''Saving features into cached file %s''' , a__ ) torch.save(self.features , a__ ) def __len__(self : int ): """simple docstring""" return len(self.features ) def __getitem__(self : Dict , a__ : List[Any] ): """simple docstring""" return self.features[i] def a (self : List[Any] ): """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class SCREAMING_SNAKE_CASE__ : A_ : List[InputFeatures] def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ): """simple docstring""" __snake_case = hans_processors[task]() __snake_case = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case = label_list[2], label_list[1] __snake_case = label_list __snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ ) __snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case = tf.data.Dataset.from_generator( a__ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def a (self : Union[str, Any] ): """simple docstring""" return self.dataset def __len__(self : Dict ): """simple docstring""" return len(self.features ) def __getitem__(self : Any , a__ : Dict ): """simple docstring""" return self.features[i] def a (self : str ): """simple docstring""" return self.label_list class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def a (self : Dict , a__ : Dict ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def a (self : Optional[int] , a__ : Tuple ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def a (self : int ): """simple docstring""" return ["contradiction", "entailment", "neutral"] def a (self : Any , a__ : Optional[int] , a__ : List[Any] ): """simple docstring""" __snake_case = [] for i, line in enumerate(a__ ): if i == 0: continue __snake_case = '''%s-%s''' % (set_type, line[0]) __snake_case = line[5] __snake_case = line[6] __snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case = line[0] examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) ) return examples def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]: __snake_case = {label: i for i, label in enumerate(snake_case_ )} __snake_case = [] for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case = tokenizer( example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , ) __snake_case = label_map[example.label] if example.label in label_map else 0 __snake_case = int(example.pairID ) features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features snake_case_ = { 'hans': 3, } snake_case_ = { 'hans': HansProcessor, }
24
1
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class SCREAMING_SNAKE_CASE__ : def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ): """simple docstring""" __snake_case = parent __snake_case = out_indices if out_indices is not None else [4] __snake_case = stage_names __snake_case = out_features __snake_case = backbone __snake_case = batch_size __snake_case = image_size __snake_case = num_channels __snake_case = use_pretrained_backbone __snake_case = is_training def a (self : Union[str, Any] ): """simple docstring""" __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = self.get_config() return config, pixel_values def a (self : Any ): """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def a (self : List[Any] , a__ : int , a__ : int ): """simple docstring""" __snake_case = TimmBackbone(config=a__ ) model.to(a__ ) model.eval() with torch.no_grad(): __snake_case = model(a__ ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def a (self : str ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case = config_and_inputs __snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch @require_timm class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else () A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {} A_ : List[Any] = False A_ : Dict = False A_ : Any = False A_ : List[Any] = False def a (self : Tuple ): """simple docstring""" __snake_case = TimmBackboneModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ ) def a (self : Any ): """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a (self : int ): """simple docstring""" __snake_case = '''resnet18''' __snake_case = '''microsoft/resnet-18''' __snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ ) __snake_case = AutoBackbone.from_pretrained(a__ ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] ) __snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' ) def a (self : str ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''TimmBackbone initialization is managed on the timm side''' ) def a (self : Union[str, Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def a (self : Optional[int] ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def a (self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : Dict ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : List[Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' ) def a (self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' ) def a (self : List[Any] ): """simple docstring""" pass @unittest.skip('''Safetensors is not supported by timm.''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def a (self : Tuple ): """simple docstring""" pass def a (self : Tuple ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a__ ) def a (self : Dict ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = True __snake_case = self.has_attentions # no need to test all models as different heads yield the same functionality __snake_case = self.all_model_classes[0] __snake_case = model_class(a__ ) model.to(a__ ) __snake_case = self._prepare_for_class(a__ , a__ ) __snake_case = model(**a__ ) __snake_case = outputs[0][-1] # Encoder-/Decoder-only models __snake_case = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __snake_case = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=a__ ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def a (self : Optional[int] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __snake_case = copy.deepcopy(a__ ) __snake_case = None __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __snake_case = copy.deepcopy(a__ ) __snake_case = False __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ )
24
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[str] = ['image_processor', 'tokenizer'] A_ : Optional[Any] = 'CLIPImageProcessor' A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ): """simple docstring""" __snake_case = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a__ , ) __snake_case = kwargs.pop('''feature_extractor''' ) __snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a__ , a__ ) def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ ) if images is not None: __snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ ) if text is not None and images is not None: __snake_case = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ ) def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*a__ , **a__ ) def a (self : Any , *a__ : List[Any] , **a__ : List[str] ): """simple docstring""" return self.tokenizer.decode(*a__ , **a__ ) @property def a (self : int ): """simple docstring""" __snake_case = self.tokenizer.model_input_names __snake_case = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
24
1
snake_case_ = { "joule": 1.0, "kilojoule": 1000, "megajoule": 1000000, "gigajoule": 1000000000, "wattsecond": 1.0, "watthour": 3600, "kilowatthour": 3600000, "newtonmeter": 1.0, "calorie_nutr": 4186.8, "kilocalorie_nutr": 4186800.00, "electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9, "britishthermalunit_it": 1055.05585, "footpound": 1.35_58_18, } def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str , snake_case_ : float ) -> float: if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: __snake_case = ( f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" f"""Valid values are: {', '.join(snake_case_ )}""" ) raise ValueError(snake_case_ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
24
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Dict ): """simple docstring""" __snake_case = logging.get_logger() # the current default level is logging.WARNING __snake_case = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(a__ ) def a (self : Dict ): """simple docstring""" __snake_case = logging.get_verbosity() __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = '''Testing 1, 2, 3''' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , '''''' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) # restore to the original level logging.set_verbosity(a__ ) @mockenv(TRANSFORMERS_VERBOSITY='''error''' ) def a (self : Dict ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() # this action activates the env var __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ ) __snake_case = logging.log_levels[env_level_str] __snake_case = logging.get_verbosity() self.assertEqual( a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level __snake_case = '''''' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='''super-error''' ) def a (self : List[Any] ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() __snake_case = logging.logging.getLogger() with CaptureLogger(a__ ) as cl: # this action activates the env var logging.get_logger('''transformers.models.bart.tokenization_bart''' ) self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out ) # no need to restore as nothing was changed def a (self : Any ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = '''Testing 1, 2, 3''' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ): # nothing should be logged as env var disables this method with CaptureLogger(a__ ) as cl: logger.warning_advice(a__ ) self.assertEqual(cl.out , '''''' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(a__ ) as cl: logger.warning_advice(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) def lowerCamelCase__ ( ) -> str: disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
24
1
from collections.abc import Sequence from queue import Queue class SCREAMING_SNAKE_CASE__ : def __init__(self : Union[str, Any] , a__ : Any , a__ : Tuple , a__ : List[Any] , a__ : List[str]=None , a__ : Union[str, Any]=None ): """simple docstring""" __snake_case = start __snake_case = end __snake_case = val __snake_case = (start + end) // 2 __snake_case = left __snake_case = right def __repr__(self : int ): """simple docstring""" return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})""" class SCREAMING_SNAKE_CASE__ : def __init__(self : Dict , a__ : Sequence , a__ : Tuple ): """simple docstring""" __snake_case = collection __snake_case = function if self.collection: __snake_case = self._build_tree(0 , len(a__ ) - 1 ) def a (self : Optional[Any] , a__ : Dict , a__ : Tuple ): """simple docstring""" self._update_tree(self.root , a__ , a__ ) def a (self : Tuple , a__ : List[Any] , a__ : Optional[Any] ): """simple docstring""" return self._query_range(self.root , a__ , a__ ) def a (self : List[Any] , a__ : Any , a__ : List[Any] ): """simple docstring""" if start == end: return SegmentTreeNode(a__ , a__ , self.collection[start] ) __snake_case = (start + end) // 2 __snake_case = self._build_tree(a__ , a__ ) __snake_case = self._build_tree(mid + 1 , a__ ) return SegmentTreeNode(a__ , a__ , self.fn(left.val , right.val ) , a__ , a__ ) def a (self : str , a__ : Any , a__ : List[str] , a__ : Tuple ): """simple docstring""" if node.start == i and node.end == i: __snake_case = val return if i <= node.mid: self._update_tree(node.left , a__ , a__ ) else: self._update_tree(node.right , a__ , a__ ) __snake_case = self.fn(node.left.val , node.right.val ) def a (self : Dict , a__ : Any , a__ : List[Any] , a__ : str ): """simple docstring""" if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , a__ , a__ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , a__ , node.mid ) , self._query_range(node.right , node.mid + 1 , a__ ) , ) else: # range in right child tree return self._query_range(node.right , a__ , a__ ) def a (self : Dict ): """simple docstring""" if self.root is not None: __snake_case = Queue() queue.put(self.root ) while not queue.empty(): __snake_case = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('*' * 50) snake_case_ = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
24
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : List[str] = CpmAntTokenizer A_ : Optional[int] = False def a (self : Optional[int] ): """simple docstring""" super().setUp() __snake_case = [ '''<d>''', '''</d>''', '''<s>''', '''</s>''', '''</_>''', '''<unk>''', '''<pad>''', '''</n>''', '''我''', '''是''', '''C''', '''P''', '''M''', '''A''', '''n''', '''t''', ] __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) @tooslow def a (self : Dict ): """simple docstring""" __snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' ) __snake_case = '''今天天气真好!''' __snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!'''] __snake_case = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) __snake_case = '''今天天气真好!''' __snake_case = [tokenizer.bos_token] + tokens __snake_case = [6, 9802, 1_4962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ ) __snake_case = tokenizer.decode(a__ ) self.assertEqual(a__ , a__ )
24
1
import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : Union[str, Any] = ComputeEnvironment.AMAZON_SAGEMAKER A_ : List[Any] = True A_ : Tuple = 'ml.p3.2xlarge' A_ : Union[str, Any] = 'accelerate_sagemaker_execution_role' A_ : str = 'hf-sm' A_ : str = 'us-east-1' A_ : Tuple = 1 A_ : int = 'accelerate-sagemaker-1' A_ : Union[str, Any] = '1.6' A_ : Tuple = '4.4' A_ : Optional[Any] = 'train.py' A_ : Optional[Any] = [ '--model_name_or_path', 'bert', '--do_train', 'False', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] A_ : Dict = [ '--model_name_or_path', 'bert', '--do_train', '--do_test', 'False', '--do_predict', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Dict ): """simple docstring""" __snake_case = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args['''model_name_or_path'''] , a__ ) assert isinstance(converted_args['''do_train'''] , a__ ) assert isinstance(converted_args['''epochs'''] , a__ ) assert isinstance(converted_args['''learning_rate'''] , a__ ) assert isinstance(converted_args['''max_steps'''] , a__ ) with pytest.raises(a__ ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
24
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class SCREAMING_SNAKE_CASE__ : def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ): """simple docstring""" __snake_case = parent __snake_case = out_indices if out_indices is not None else [4] __snake_case = stage_names __snake_case = out_features __snake_case = backbone __snake_case = batch_size __snake_case = image_size __snake_case = num_channels __snake_case = use_pretrained_backbone __snake_case = is_training def a (self : Union[str, Any] ): """simple docstring""" __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = self.get_config() return config, pixel_values def a (self : Any ): """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def a (self : List[Any] , a__ : int , a__ : int ): """simple docstring""" __snake_case = TimmBackbone(config=a__ ) model.to(a__ ) model.eval() with torch.no_grad(): __snake_case = model(a__ ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def a (self : str ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case = config_and_inputs __snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch @require_timm class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else () A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {} A_ : List[Any] = False A_ : Dict = False A_ : Any = False A_ : List[Any] = False def a (self : Tuple ): """simple docstring""" __snake_case = TimmBackboneModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ ) def a (self : Any ): """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a (self : int ): """simple docstring""" __snake_case = '''resnet18''' __snake_case = '''microsoft/resnet-18''' __snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ ) __snake_case = AutoBackbone.from_pretrained(a__ ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] ) __snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' ) def a (self : str ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''TimmBackbone initialization is managed on the timm side''' ) def a (self : Union[str, Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def a (self : Optional[int] ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def a (self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : Dict ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : List[Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' ) def a (self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' ) def a (self : List[Any] ): """simple docstring""" pass @unittest.skip('''Safetensors is not supported by timm.''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def a (self : Tuple ): """simple docstring""" pass def a (self : Tuple ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a__ ) def a (self : Dict ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = True __snake_case = self.has_attentions # no need to test all models as different heads yield the same functionality __snake_case = self.all_model_classes[0] __snake_case = model_class(a__ ) model.to(a__ ) __snake_case = self._prepare_for_class(a__ , a__ ) __snake_case = model(**a__ ) __snake_case = outputs[0][-1] # Encoder-/Decoder-only models __snake_case = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __snake_case = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=a__ ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def a (self : Optional[int] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __snake_case = copy.deepcopy(a__ ) __snake_case = None __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __snake_case = copy.deepcopy(a__ ) __snake_case = False __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ )
24
1
def lowerCamelCase__ ( snake_case_ : Any ) -> Tuple: __snake_case = [0] * len(snake_case_ ) __snake_case = [] __snake_case = [] __snake_case = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(snake_case_ ) ): if indegree[i] == 0: queue.append(snake_case_ ) while queue: __snake_case = queue.pop(0 ) cnt += 1 topo.append(snake_case_ ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(snake_case_ ) if cnt != len(snake_case_ ): print('''Cycle exists''' ) else: print(snake_case_ ) # Adjacency List of Graph snake_case_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
24
import os import pytest from transformers.dynamic_module_utils import get_imports snake_case_ = '\nimport os\n' snake_case_ = '\ndef foo():\n import os\n return False\n' snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n' snake_case_ = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , snake_case_ ) def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict: __snake_case = os.path.join(snake_case_ , '''test_file.py''' ) with open(snake_case_ , '''w''' ) as _tmp_file: _tmp_file.write(snake_case_ ) __snake_case = get_imports(snake_case_ ) assert parsed_imports == ["os"]
24
1
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__(self : Union[str, Any] , a__ : List[str] , a__ : Any=2 , a__ : Dict=3 , a__ : Dict=4 , a__ : Tuple=2 , a__ : str=7 , a__ : List[Any]=True , a__ : str=True , a__ : Optional[Any]=True , a__ : Optional[Any]=True , a__ : Optional[Any]=99 , a__ : List[Any]=36 , a__ : Optional[Any]=3 , a__ : List[str]=4 , a__ : Tuple=37 , a__ : Any="gelu" , a__ : str=0.1 , a__ : Any=0.1 , a__ : List[str]=512 , a__ : Any=16 , a__ : Tuple=2 , a__ : List[str]=0.0_2 , a__ : int=6 , a__ : Union[str, Any]=6 , a__ : str=3 , a__ : Union[str, Any]=4 , a__ : Tuple=None , a__ : List[Any]=1000 , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = num_channels __snake_case = image_size __snake_case = patch_size __snake_case = text_seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = coordinate_size __snake_case = shape_size __snake_case = num_labels __snake_case = num_choices __snake_case = scope __snake_case = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __snake_case = text_seq_length __snake_case = (image_size // patch_size) ** 2 + 1 __snake_case = self.text_seq_length + self.image_seq_length def a (self : Dict ): """simple docstring""" __snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __snake_case = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case = bbox[i, j, 3] __snake_case = bbox[i, j, 1] __snake_case = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case = bbox[i, j, 2] __snake_case = bbox[i, j, 0] __snake_case = t __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = None if self.use_input_mask: __snake_case = random_attention_mask([self.batch_size, self.text_seq_length] ) __snake_case = None if self.use_token_type_ids: __snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __snake_case = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def a (self : int , a__ : int , a__ : List[Any] , a__ : Any , a__ : Union[str, Any] , a__ : Dict , a__ : Any , a__ : Dict , a__ : Optional[Any] ): """simple docstring""" __snake_case = LayoutLMvaModel(config=a__ ) model.to(a__ ) model.eval() # text + image __snake_case = model(a__ , pixel_values=a__ ) __snake_case = model( a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ ) __snake_case = model(a__ , bbox=a__ , pixel_values=a__ , token_type_ids=a__ ) __snake_case = model(a__ , bbox=a__ , pixel_values=a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __snake_case = model(a__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __snake_case = model(pixel_values=a__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def a (self : Dict , a__ : Dict , a__ : Tuple , a__ : Tuple , a__ : List[Any] , a__ : Union[str, Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : List[Any] ): """simple docstring""" __snake_case = self.num_labels __snake_case = LayoutLMvaForSequenceClassification(a__ ) model.to(a__ ) model.eval() __snake_case = model( a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a (self : Optional[Any] , a__ : Optional[Any] , a__ : List[Any] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[Any] , a__ : int , a__ : Union[str, Any] , a__ : Dict ): """simple docstring""" __snake_case = self.num_labels __snake_case = LayoutLMvaForTokenClassification(config=a__ ) model.to(a__ ) model.eval() __snake_case = model( a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def a (self : List[str] , a__ : Dict , a__ : Tuple , a__ : Dict , a__ : int , a__ : str , a__ : List[str] , a__ : List[Any] , a__ : List[Any] ): """simple docstring""" __snake_case = LayoutLMvaForQuestionAnswering(config=a__ ) model.to(a__ ) model.eval() __snake_case = model( a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a (self : Any ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : List[Any] = False A_ : Any = False A_ : Optional[int] = False A_ : List[str] = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) A_ : Union[str, Any] = ( {'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel} if is_torch_available() else {} ) def a (self : Any , a__ : Optional[Any] , a__ : Dict , a__ : Optional[Any] , a__ : Tuple , a__ : List[str] ): """simple docstring""" return True def a (self : List[str] ): """simple docstring""" __snake_case = LayoutLMvaModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 ) def a (self : Optional[int] , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Tuple=False ): """simple docstring""" __snake_case = copy.deepcopy(a__ ) if model_class in get_values(a__ ): __snake_case = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(a__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(a__ ): __snake_case = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a__ ) elif model_class in get_values(a__ ): __snake_case = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a__ ) __snake_case = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a__ ) elif model_class in [ *get_values(a__ ), ]: __snake_case = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a__ ) elif model_class in [ *get_values(a__ ), ]: __snake_case = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a__ , ) return inputs_dict def a (self : str ): """simple docstring""" self.config_tester.run_common_tests() def a (self : Any ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def a (self : Dict ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case = type self.model_tester.create_and_check_model(*a__ ) def a (self : Optional[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a__ ) def a (self : List[str] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a__ ) def a (self : Dict ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a__ ) @slow def a (self : List[Any] ): """simple docstring""" for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = LayoutLMvaModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) def lowerCamelCase__ ( ) -> Optional[int]: __snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def a (self : int ): """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=a__ ) if is_vision_available() else None @slow def a (self : Any ): """simple docstring""" __snake_case = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(a__ ) __snake_case = self.default_image_processor __snake_case = prepare_img() __snake_case = image_processor(images=a__ , return_tensors='''pt''' ).pixel_values.to(a__ ) __snake_case = torch.tensor([[1, 2]] ) __snake_case = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass __snake_case = model( input_ids=input_ids.to(a__ ) , bbox=bbox.to(a__ ) , pixel_values=pixel_values.to(a__ ) , ) # verify the logits __snake_case = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , a__ ) __snake_case = torch.tensor( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(a__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1E-4 ) )
24
import socket def lowerCamelCase__ ( ) -> Any: __snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) __snake_case = socket.gethostname() __snake_case = 1_2312 sock.connect((host, port) ) sock.send(B'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: __snake_case = sock.recv(1024 ) if not data: break out_file.write(snake_case_ ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
24
1
import requests def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str ) -> None: __snake_case = {'''Content-Type''': '''application/json'''} __snake_case = requests.post(snake_case_ , json={'''text''': message_body} , headers=snake_case_ ) if response.status_code != 200: __snake_case = ( '''Request to slack returned an error ''' f"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(snake_case_ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
24
from __future__ import annotations def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive __snake_case = len(snake_case_ ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else __snake_case = array[0] __snake_case = False __snake_case = 1 __snake_case = [] while not is_found and i < array_length: if array[i] < pivot: __snake_case = True __snake_case = [element for element in array[i:] if element >= array[i]] __snake_case = longest_subsequence(snake_case_ ) if len(snake_case_ ) > len(snake_case_ ): __snake_case = temp_array else: i += 1 __snake_case = [element for element in array[1:] if element >= pivot] __snake_case = [pivot, *longest_subsequence(snake_case_ )] if len(snake_case_ ) > len(snake_case_ ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
24
1
import math def lowerCamelCase__ ( snake_case_ : int ) -> bool: assert isinstance(snake_case_ , snake_case_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False __snake_case = range(3 , int(math.sqrt(snake_case_ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Dict=1 , **snake_case_ : List[Any] ) -> str: __snake_case = factor * value __snake_case = value while not is_prime(snake_case_ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **snake_case_ ) return value
24
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = num_labels __snake_case = scope __snake_case = range_bbox def a (self : Optional[int] ): """simple docstring""" __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case = bbox[i, j, 3] __snake_case = bbox[i, j, 1] __snake_case = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case = bbox[i, j, 2] __snake_case = bbox[i, j, 0] __snake_case = t __snake_case = None if self.use_input_mask: __snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __snake_case = None if self.use_token_type_ids: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def a (self : List[str] ): """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ): """simple docstring""" __snake_case = LiltModel(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ ) __snake_case = model(a__ , bbox=a__ , token_type_ids=a__ ) __snake_case = model(a__ , bbox=a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ): """simple docstring""" __snake_case = self.num_labels __snake_case = LiltForTokenClassification(config=a__ ) model.to(a__ ) model.eval() __snake_case = model( a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ): """simple docstring""" __snake_case = LiltForQuestionAnswering(config=a__ ) model.to(a__ ) model.eval() __snake_case = model( a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a (self : Tuple ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : List[Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) A_ : Any = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) A_ : Optional[int] = False A_ : List[Any] = False def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ): """simple docstring""" return True def a (self : Union[str, Any] ): """simple docstring""" __snake_case = LiltModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 ) def a (self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() def a (self : int ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def a (self : List[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case = type self.model_tester.create_and_check_model(*a__ ) def a (self : Optional[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a__ ) def a (self : Union[str, Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a__ ) @slow def a (self : Optional[int] ): """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = LiltModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) @require_torch @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Tuple ): """simple docstring""" __snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ ) __snake_case = torch.tensor([[1, 2]] , device=a__ ) __snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ ) # forward pass with torch.no_grad(): __snake_case = model(input_ids=a__ , bbox=a__ ) __snake_case = torch.Size([1, 2, 768] ) __snake_case = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , ) self.assertTrue(outputs.last_hidden_state.shape , a__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
24
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ['XGLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ['XGLMTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XGLMForCausalLM', 'XGLMModel', 'XGLMPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'FlaxXGLMForCausalLM', 'FlaxXGLMModel', 'FlaxXGLMPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXGLMForCausalLM', 'TFXGLMModel', 'TFXGLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
24
import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class SCREAMING_SNAKE_CASE__ : @staticmethod def a (*a__ : List[str] , **a__ : List[str] ): """simple docstring""" pass def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]: return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. snake_case_ = ( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): A_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def a (self : List[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ): """simple docstring""" __snake_case = pipeline( '''document-question-answering''' , model=a__ , tokenizer=a__ , image_processor=a__ ) __snake_case = INVOICE_URL __snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) ) __snake_case = '''What is the placebo?''' __snake_case = [ { '''image''': load_image(a__ ), '''question''': question, }, { '''image''': image, '''question''': question, }, { '''image''': image, '''question''': question, '''word_boxes''': word_boxes, }, ] return dqa_pipeline, examples def a (self : Union[str, Any] , a__ : Optional[int] , a__ : Dict ): """simple docstring""" __snake_case = dqa_pipeline(a__ , top_k=2 ) self.assertEqual( a__ , [ [ {'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )}, {'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def a (self : Dict ): """simple docstring""" __snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' ) __snake_case = INVOICE_URL __snake_case = '''How many cats are there?''' __snake_case = [ {'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39}, {'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40}, ] __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ ) __snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably __snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual(a__ , [] ) # We can optionnally pass directly the words and bounding boxes __snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' __snake_case = [] __snake_case = [] __snake_case = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 ) self.assertEqual(a__ , [] ) @slow @require_torch @require_detectrona @require_pytesseract def a (self : str ): """simple docstring""" __snake_case = pipeline( '''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def a (self : List[Any] ): """simple docstring""" __snake_case = pipeline( '''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def a (self : Tuple ): """simple docstring""" __snake_case = AutoTokenizer.from_pretrained( '''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ ) __snake_case = pipeline( '''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) __snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) __snake_case = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] ] * 2 , ) __snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) ) # This model should also work if `image` is set to None __snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def a (self : Dict ): """simple docstring""" __snake_case = AutoTokenizer.from_pretrained( '''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ ) __snake_case = pipeline( '''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , max_seq_len=50 , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] ] * 2 , ) __snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) ) # This model should also work if `image` is set to None __snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) @slow @require_torch def a (self : Tuple ): """simple docstring""" __snake_case = pipeline( '''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{'''answer''': '''us-001'''}] ) @require_tf @unittest.skip('''Document question answering not implemented in TF''' ) def a (self : List[str] ): """simple docstring""" pass
24
1
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { 't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json', } class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : Optional[Any] = 't5' A_ : str = ['past_key_values'] A_ : Optional[Any] = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__(self : Optional[int] , a__ : int=3_2128 , a__ : Dict=512 , a__ : Optional[Any]=64 , a__ : Dict=2048 , a__ : Optional[Any]=6 , a__ : int=None , a__ : List[Any]=8 , a__ : int=32 , a__ : Union[str, Any]=128 , a__ : str=0.1 , a__ : str=1E-6 , a__ : str=1.0 , a__ : Union[str, Any]="relu" , a__ : Any=True , a__ : Optional[int]=True , a__ : Any=0 , a__ : str=1 , **a__ : List[Any] , ): """simple docstring""" __snake_case = vocab_size __snake_case = d_model __snake_case = d_kv __snake_case = d_ff __snake_case = num_layers __snake_case = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __snake_case = num_heads __snake_case = relative_attention_num_buckets __snake_case = relative_attention_max_distance __snake_case = dropout_rate __snake_case = layer_norm_epsilon __snake_case = initializer_factor __snake_case = feed_forward_proj __snake_case = use_cache __snake_case = self.feed_forward_proj.split('''-''' ) __snake_case = act_info[-1] __snake_case = act_info[0] == '''gated''' if len(a__ ) > 1 and act_info[0] != "gated" or len(a__ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __snake_case = '''gelu_new''' super().__init__( pad_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , **a__ , ) class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): @property def a (self : int ): """simple docstring""" __snake_case = { '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: __snake_case = '''past_encoder_sequence + sequence''' __snake_case = {0: '''batch'''} __snake_case = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: __snake_case = {0: '''batch''', 1: '''decoder_sequence'''} __snake_case = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(a__ , direction='''inputs''' ) return common_inputs @property def a (self : int ): """simple docstring""" return 13
24
from __future__ import annotations def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]: __snake_case = [] __snake_case = [] __snake_case = 0 __snake_case = sum(snake_case_ ) create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) return result def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None: if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum: return if sum(snake_case_ ) == max_sum: result.append(snake_case_ ) return for index in range(snake_case_ , len(snake_case_ ) ): create_state_space_tree( snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , ) snake_case_ = [3, 34, 4, 12, 5, 2] snake_case_ = 9 snake_case_ = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
24
1
from __future__ import annotations def lowerCamelCase__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> dict[str, float]: if (voltage, current, resistance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if resistance < 0: raise ValueError('''Resistance cannot be negative''' ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
24
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = image_size __snake_case = num_channels __snake_case = num_stages __snake_case = hidden_sizes __snake_case = depths __snake_case = is_training __snake_case = use_labels __snake_case = intermediate_size __snake_case = hidden_act __snake_case = num_labels __snake_case = initializer_range __snake_case = out_features __snake_case = out_indices __snake_case = scope def a (self : Dict ): """simple docstring""" __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.num_labels ) __snake_case = self.get_config() return config, pixel_values, labels def a (self : List[str] ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ): """simple docstring""" __snake_case = ConvNextModel(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ): """simple docstring""" __snake_case = ConvNextForImageClassification(a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ): """simple docstring""" __snake_case = ConvNextBackbone(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __snake_case = None __snake_case = ConvNextBackbone(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a (self : Tuple ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case = config_and_inputs __snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : Dict = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) A_ : Optional[Any] = ( {'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification} if is_torch_available() else {} ) A_ : Dict = True A_ : Optional[Any] = False A_ : int = False A_ : int = False A_ : List[str] = False def a (self : List[str] ): """simple docstring""" __snake_case = ConvNextModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 ) def a (self : Tuple ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a (self : str ): """simple docstring""" return @unittest.skip(reason='''ConvNext does not use inputs_embeds''' ) def a (self : int ): """simple docstring""" pass @unittest.skip(reason='''ConvNext does not support input and output embeddings''' ) def a (self : Dict ): """simple docstring""" pass @unittest.skip(reason='''ConvNext does not use feedforward chunking''' ) def a (self : List[Any] ): """simple docstring""" pass def a (self : Optional[Any] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a__ ) def a (self : List[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def a (self : Dict ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*a__ ) def a (self : Dict ): """simple docstring""" def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ): __snake_case = model_class(a__ ) model.to(a__ ) model.eval() with torch.no_grad(): __snake_case = model(**self._prepare_for_class(a__ , a__ ) ) __snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case = self.model_tester.num_stages self.assertEqual(len(a__ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = True check_hidden_states_output(a__ , a__ , a__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case = True check_hidden_states_output(a__ , a__ , a__ ) def a (self : Optional[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a__ ) @slow def a (self : Any ): """simple docstring""" for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = ConvNextModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) def lowerCamelCase__ ( ) -> List[str]: __snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def a (self : Tuple ): """simple docstring""" return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None @slow def a (self : Optional[Any] ): """simple docstring""" __snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ ) __snake_case = self.default_image_processor __snake_case = prepare_img() __snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ ) # forward pass with torch.no_grad(): __snake_case = model(**a__ ) # verify the logits __snake_case = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , a__ ) __snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ): A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else () A_ : List[Any] = ConvNextConfig A_ : Optional[Any] = False def a (self : Optional[int] ): """simple docstring""" __snake_case = ConvNextModelTester(self )
24
1
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : Union[str, Any] = GPTaTokenizer A_ : List[Any] = GPTaTokenizerFast A_ : Dict = True A_ : List[str] = {'add_prefix_space': True} A_ : List[str] = False def a (self : Any ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __snake_case = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] __snake_case = dict(zip(a__ , range(len(a__ ) ) ) ) __snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __snake_case = {'''unk_token''': '''<unk>'''} __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(a__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(a__ ) ) def a (self : List[str] , **a__ : List[Any] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **a__ ) def a (self : Dict , **a__ : Union[str, Any] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a__ ) def a (self : int , a__ : Optional[int] ): """simple docstring""" __snake_case = '''lower newer''' __snake_case = '''lower newer''' return input_text, output_text def a (self : Optional[int] ): """simple docstring""" __snake_case = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __snake_case = '''lower newer''' __snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __snake_case = tokenizer.tokenize(a__ , add_prefix_space=a__ ) self.assertListEqual(a__ , a__ ) __snake_case = tokens + [tokenizer.unk_token] __snake_case = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ ) def a (self : int ): """simple docstring""" if not self.test_rust_tokenizer: return __snake_case = self.get_tokenizer() __snake_case = self.get_rust_tokenizer(add_prefix_space=a__ ) __snake_case = '''lower newer''' # Testing tokenization __snake_case = tokenizer.tokenize(a__ , add_prefix_space=a__ ) __snake_case = rust_tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) # Testing conversion to ids without special tokens __snake_case = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ ) __snake_case = rust_tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) # Testing conversion to ids with special tokens __snake_case = self.get_rust_tokenizer(add_prefix_space=a__ ) __snake_case = tokenizer.encode(a__ , add_prefix_space=a__ ) __snake_case = rust_tokenizer.encode(a__ ) self.assertListEqual(a__ , a__ ) # Testing the unknown token __snake_case = tokens + [rust_tokenizer.unk_token] __snake_case = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ ) def a (self : Tuple , *a__ : Optional[int] , **a__ : Optional[Any] ): """simple docstring""" pass def a (self : List[str] , a__ : Any=15 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __snake_case = self.rust_tokenizer_class.from_pretrained(a__ , **a__ ) # Simple input __snake_case = '''This is a simple input''' __snake_case = ['''This is a simple input 1''', '''This is a simple input 2'''] __snake_case = ('''This is a simple input''', '''This is a pair''') __snake_case = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''' ) # Simple input self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''' ) # Simple input self.assertRaises( a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , ) # Pair input self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''' ) # Pair input self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''' ) # Pair input self.assertRaises( a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , ) def a (self : List[str] ): """simple docstring""" __snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input __snake_case = '''This is a simple input''' __snake_case = ['''This is a simple input looooooooong''', '''This is a simple input'''] __snake_case = ('''This is a simple input''', '''This is a pair''') __snake_case = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] __snake_case = tokenizer.pad_token_id __snake_case = tokenizer(a__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' ) __snake_case = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors='''np''' ) __snake_case = tokenizer(*a__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' ) __snake_case = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def a (self : Optional[Any] ): """simple docstring""" __snake_case = '''$$$''' __snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ ) __snake_case = '''This is a simple input''' __snake_case = ['''This is a simple input 1''', '''This is a simple input 2'''] __snake_case = tokenizer.bos_token_id __snake_case = tokenizer(a__ ) __snake_case = tokenizer(a__ ) self.assertEqual(out_s.input_ids[0] , a__ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __snake_case = tokenizer.decode(out_s.input_ids ) __snake_case = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , a__ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def a (self : List[Any] ): """simple docstring""" pass def a (self : Dict ): """simple docstring""" __snake_case = [self.get_tokenizer(do_lower_case=a__ , add_bos_token=a__ )] for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __snake_case = '''Encode this.''' __snake_case = '''This one too please.''' __snake_case = tokenizer.encode(a__ , add_special_tokens=a__ ) encoded_sequence += tokenizer.encode(a__ , add_special_tokens=a__ ) __snake_case = tokenizer.encode_plus( a__ , a__ , add_special_tokens=a__ , return_special_tokens_mask=a__ , ) __snake_case = encoded_sequence_dict['''input_ids'''] __snake_case = encoded_sequence_dict['''special_tokens_mask'''] self.assertEqual(len(a__ ) , len(a__ ) ) __snake_case = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(a__ ) ] __snake_case = [x for x in filtered_sequence if x is not None] self.assertEqual(a__ , a__ ) @require_tokenizers class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Optional[int] ): """simple docstring""" __snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=a__ ) __snake_case = '''A photo of a cat''' __snake_case = tokenizer.encode( a__ , ) self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('''test_opt''' ) __snake_case = AutoTokenizer.from_pretrained('''./test_opt''' ) __snake_case = tokenizer.encode( a__ , ) self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] ) def a (self : List[str] ): """simple docstring""" __snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=a__ ) __snake_case = '''A photo of a cat''' __snake_case = tokenizer.encode( a__ , ) # Same as above self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip('''This test is failing because of a bug in the fast tokenizer''' ) def a (self : Tuple ): """simple docstring""" __snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=a__ ) __snake_case = '''bos''' __snake_case = tokenizer.get_vocab()['''bos'''] __snake_case = '''A photo of a cat''' __snake_case = tokenizer.encode( a__ , ) # We changed the bos token self.assertEqual(a__ , [3_1957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('''./tok''' ) __snake_case = AutoTokenizer.from_pretrained('''./tok''' ) self.assertTrue(tokenizer.is_fast ) __snake_case = tokenizer.encode( a__ , ) self.assertEqual(a__ , [3_1957, 250, 1345, 9, 10, 4758] )
24
def lowerCamelCase__ ( ) -> int: return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(snake_case_ , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
24
1
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : List[Any] , a__ : Union[str, Any] ): """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): __snake_case = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(a__ ) def a (self : Union[str, Any] ): """simple docstring""" __snake_case = '''sshleifer/tiny-gpt2''' __snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a__ , multi_process=a__ , ) __snake_case = TensorFlowBenchmark(a__ ) __snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a (self : Optional[Any] ): """simple docstring""" __snake_case = '''sgugger/tiny-distilbert-classification''' __snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , only_pretrain_model=a__ , ) __snake_case = TensorFlowBenchmark(a__ ) __snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a (self : Tuple ): """simple docstring""" __snake_case = '''sshleifer/tiny-gpt2''' __snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , ) __snake_case = TensorFlowBenchmark(a__ ) __snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a (self : Dict ): """simple docstring""" __snake_case = '''sshleifer/tiny-gpt2''' __snake_case = AutoConfig.from_pretrained(a__ ) __snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a__ , multi_process=a__ , ) __snake_case = TensorFlowBenchmark(a__ , [config] ) __snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a (self : str ): """simple docstring""" __snake_case = '''sshleifer/tiny-gpt2''' __snake_case = AutoConfig.from_pretrained(a__ ) __snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , ) __snake_case = TensorFlowBenchmark(a__ , [config] ) __snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a (self : Dict ): """simple docstring""" __snake_case = '''sshleifer/tiny-gpt2''' __snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , ) __snake_case = TensorFlowBenchmark(a__ ) __snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a (self : Tuple ): """simple docstring""" __snake_case = '''sshleifer/tiny-gpt2''' __snake_case = AutoConfig.from_pretrained(a__ ) __snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , ) __snake_case = TensorFlowBenchmark(a__ , [config] ) __snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a (self : Union[str, Any] ): """simple docstring""" __snake_case = '''patrickvonplaten/t5-tiny-random''' __snake_case = AutoConfig.from_pretrained(a__ ) __snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , ) __snake_case = TensorFlowBenchmark(a__ , configs=[config] ) __snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def a (self : Any ): """simple docstring""" __snake_case = '''sshleifer/tiny-gpt2''' __snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a__ , multi_process=a__ , ) __snake_case = TensorFlowBenchmark(a__ ) __snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a (self : Optional[int] ): """simple docstring""" __snake_case = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a__ , save_to_csv=a__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a__ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a__ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a__ , '''env.csv''' ) , multi_process=a__ , ) __snake_case = TensorFlowBenchmark(a__ ) benchmark.run() self.assertTrue(Path(os.path.join(a__ , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a__ , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a__ , '''env.csv''' ) ).exists() ) def a (self : Any ): """simple docstring""" __snake_case = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(a__ : List[str] ): self.assertTrue(hasattr(a__ , '''sequential''' ) ) self.assertTrue(hasattr(a__ , '''cumulative''' ) ) self.assertTrue(hasattr(a__ , '''current''' ) ) self.assertTrue(hasattr(a__ , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a__ , '''log.txt''' ) , log_print=a__ , trace_memory_line_by_line=a__ , eager_mode=a__ , multi_process=a__ , ) __snake_case = TensorFlowBenchmark(a__ ) __snake_case = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(a__ , '''log.txt''' ) ).exists() )
24
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : List[Any] = BartphoTokenizer A_ : List[str] = False A_ : Optional[Any] = True def a (self : Tuple ): """simple docstring""" super().setUp() __snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] __snake_case = dict(zip(a__ , range(len(a__ ) ) ) ) __snake_case = {'''unk_token''': '''<unk>'''} __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ) with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(f"""{token} {vocab_tokens[token]}\n""" ) __snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def a (self : str , **a__ : str ): """simple docstring""" kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ ) def a (self : str , a__ : Any ): """simple docstring""" __snake_case = '''This is a là test''' __snake_case = '''This is a<unk><unk> test''' return input_text, output_text def a (self : Dict ): """simple docstring""" __snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map ) __snake_case = '''This is a là test''' __snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split() __snake_case = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) __snake_case = tokens + [tokenizer.unk_token] __snake_case = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
24
1
snake_case_ = [ (1000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'), (100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'), (10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I'), ] def lowerCamelCase__ ( snake_case_ : str ) -> int: __snake_case = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000} __snake_case = 0 __snake_case = 0 while place < len(snake_case_ ): if (place + 1 < len(snake_case_ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def lowerCamelCase__ ( snake_case_ : int ) -> str: __snake_case = [] for arabic, roman in ROMAN: ((__snake_case) , (__snake_case)) = divmod(snake_case_ , snake_case_ ) result.append(roman * factor ) if number == 0: break return "".join(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod()
24
def lowerCamelCase__ ( snake_case_ : int ) -> int: if not isinstance(snake_case_ , snake_case_ ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) __snake_case = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
24
1
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser snake_case_ = logging.getLogger(__name__) torch.set_grad_enabled(False) snake_case_ = 'cuda' if torch.cuda.is_available() else 'cpu' def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Tuple=100 , snake_case_ : int=" " ) -> List[str]: __snake_case = text.split(snake_case_ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(snake_case_ ) , snake_case_ )] def lowerCamelCase__ ( snake_case_ : dict ) -> dict: __snake_case , __snake_case = [], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(snake_case_ ): titles.append(title if title is not None else '''''' ) texts.append(snake_case_ ) return {"title": titles, "text": texts} def lowerCamelCase__ ( snake_case_ : dict , snake_case_ : DPRContextEncoder , snake_case_ : DPRContextEncoderTokenizerFast ) -> dict: __snake_case = ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=snake_case_ , padding='''longest''' , return_tensors='''pt''' )['''input_ids'''] __snake_case = ctx_encoder(input_ids.to(device=snake_case_ ) , return_dict=snake_case_ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowerCamelCase__ ( snake_case_ : "RagExampleArguments" , snake_case_ : "ProcessingArguments" , snake_case_ : "IndexHnswArguments" , ) -> Tuple: ###################################### logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way __snake_case = load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words __snake_case = dataset.map(snake_case_ , batched=snake_case_ , num_proc=processing_args.num_proc ) # And compute the embeddings __snake_case = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=snake_case_ ) __snake_case = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) __snake_case = Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space __snake_case = dataset.map( partial(snake_case_ , ctx_encoder=snake_case_ , ctx_tokenizer=snake_case_ ) , batched=snake_case_ , batch_size=processing_args.batch_size , features=snake_case_ , ) # And finally save your dataset __snake_case = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(snake_case_ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search __snake_case = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=snake_case_ ) # And save the index __snake_case = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(snake_case_ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class SCREAMING_SNAKE_CASE__ : A_ : str = field( default=str(Path(_UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) A_ : str = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) A_ : str = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) A_ : Optional[str] = field( default=str(Path(_UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class SCREAMING_SNAKE_CASE__ : A_ : Optional[int] = field( default=_UpperCAmelCase , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) A_ : int = field( default=16 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class SCREAMING_SNAKE_CASE__ : A_ : int = field( default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) A_ : int = field( default=128 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) snake_case_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: snake_case_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
24
from math import loga def lowerCamelCase__ ( snake_case_ : int ) -> int: if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(snake_case_ , snake_case_ ): raise TypeError('''Input value must be a \'int\' type''' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
24
1
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def lowerCamelCase__ ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ) -> int: __snake_case = args.log_outputs __snake_case = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric __snake_case = load_metric('''wer''' ) __snake_case = load_metric('''cer''' ) # compute metrics __snake_case = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) __snake_case = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results __snake_case = f"""WER: {wer_result}\nCER: {cer_result}""" print(snake_case_ ) with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f: f.write(snake_case_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: __snake_case = f"""log_{dataset_id}_predictions.txt""" __snake_case = f"""log_{dataset_id}_targets.txt""" with open(snake_case_ , '''w''' ) as p, open(snake_case_ , '''w''' ) as t: # mapping function to write output def write_to_file(snake_case_ : int , snake_case_ : Optional[Any] ): p.write(f"""{i}""" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"""{i}""" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(snake_case_ , with_indices=snake_case_ ) def lowerCamelCase__ ( snake_case_ : str ) -> str: __snake_case = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training __snake_case = re.sub(snake_case_ , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! __snake_case = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: __snake_case = ''' '''.join(text.split(snake_case_ ) ) return text def lowerCamelCase__ ( snake_case_ : Any ) -> str: # load dataset __snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor __snake_case = AutoFeatureExtractor.from_pretrained(args.model_id ) __snake_case = feature_extractor.sampling_rate # resample audio __snake_case = dataset.cast_column('''audio''' , Audio(sampling_rate=snake_case_ ) ) # load eval pipeline if args.device is None: __snake_case = 0 if torch.cuda.is_available() else -1 __snake_case = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case_ : str ): __snake_case = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) __snake_case = prediction['''text'''] __snake_case = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples __snake_case = dataset.map(snake_case_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case_ , snake_case_ ) if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() parser.add_argument( '--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers' ) parser.add_argument( '--dataset', type=str, required=True, help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets', ) parser.add_argument( '--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice' ) parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`') parser.add_argument( '--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.' ) parser.add_argument( '--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.' ) parser.add_argument( '--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.' ) parser.add_argument( '--device', type=int, default=None, help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.', ) snake_case_ = parser.parse_args() main(args)
24
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING snake_case_ = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ): """simple docstring""" super().__init__(*a__ , **a__ ) requires_backends(self , '''vision''' ) self.check_model_type(a__ ) def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ): """simple docstring""" return super().__call__(a__ , **a__ ) def a (self : Dict , **a__ : Any ): """simple docstring""" return {}, {}, {} def a (self : List[str] , a__ : Any ): """simple docstring""" __snake_case = load_image(a__ ) __snake_case = image.size __snake_case = self.image_processor(images=a__ , return_tensors=self.framework ) return model_inputs def a (self : int , a__ : List[Any] ): """simple docstring""" __snake_case = self.model(**a__ ) return model_outputs def a (self : int , a__ : str ): """simple docstring""" __snake_case = model_outputs.predicted_depth __snake_case = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ ) __snake_case = prediction.squeeze().cpu().numpy() __snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' ) __snake_case = Image.fromarray(a__ ) __snake_case = {} __snake_case = predicted_depth __snake_case = depth return output_dict
24
1
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : List[Any] = BartphoTokenizer A_ : List[str] = False A_ : Optional[Any] = True def a (self : Tuple ): """simple docstring""" super().setUp() __snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] __snake_case = dict(zip(a__ , range(len(a__ ) ) ) ) __snake_case = {'''unk_token''': '''<unk>'''} __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ) with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(f"""{token} {vocab_tokens[token]}\n""" ) __snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def a (self : str , **a__ : str ): """simple docstring""" kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ ) def a (self : str , a__ : Any ): """simple docstring""" __snake_case = '''This is a là test''' __snake_case = '''This is a<unk><unk> test''' return input_text, output_text def a (self : Dict ): """simple docstring""" __snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map ) __snake_case = '''This is a là test''' __snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split() __snake_case = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) __snake_case = tokens + [tokenizer.unk_token] __snake_case = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
24
from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def lowerCamelCase__ ( ) -> Any: import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join __snake_case = '''__test_patch_submodule_mock__''' with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def lowerCamelCase__ ( ) -> Any: assert _test_patching.open is open __snake_case = '''__test_patch_submodule_builtin_mock__''' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , '''open''' , snake_case_ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def lowerCamelCase__ ( ) -> List[str]: # pandas.read_csv is not present in _test_patching __snake_case = '''__test_patch_submodule_missing_mock__''' with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ): pass def lowerCamelCase__ ( ) -> Union[str, Any]: # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point __snake_case = '''__test_patch_submodule_missing_builtin_mock__''' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , '''len''' , snake_case_ ) is None with patch_submodule(_test_patching , '''len''' , snake_case_ ): assert _test_patching.len is mock assert _test_patching.len is len def lowerCamelCase__ ( ) -> Union[str, Any]: __snake_case = '''__test_patch_submodule_start_and_stop_mock__''' __snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def lowerCamelCase__ ( ) -> Optional[int]: from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join __snake_case = '''__test_patch_submodule_successive_join__''' __snake_case = '''__test_patch_submodule_successive_dirname__''' __snake_case = '''__test_patch_submodule_successive_rename__''' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ): with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ): with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ): with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ): with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def lowerCamelCase__ ( ) -> Tuple: __snake_case = '''__test_patch_submodule_doesnt_exist_mock__''' with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ): pass with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ): pass
24
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[str] = ['image_processor', 'tokenizer'] A_ : Optional[Any] = 'CLIPImageProcessor' A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ): """simple docstring""" __snake_case = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a__ , ) __snake_case = kwargs.pop('''feature_extractor''' ) __snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a__ , a__ ) def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ ) if images is not None: __snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ ) if text is not None and images is not None: __snake_case = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ ) def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*a__ , **a__ ) def a (self : Any , *a__ : List[Any] , **a__ : List[str] ): """simple docstring""" return self.tokenizer.decode(*a__ , **a__ ) @property def a (self : int ): """simple docstring""" __snake_case = self.tokenizer.model_input_names __snake_case = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
24
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) snake_case_ = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE__ : A_ : str = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} ) A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} ) @dataclass class SCREAMING_SNAKE_CASE__ : A_ : str = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) A_ : Optional[str] = field( default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , ) A_ : Optional[int] = field( default=1_024 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A_ : Optional[int] = field( default=128 , metadata={ 'help': ( 'The maximum total sequence length for target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A_ : Optional[int] = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for validation target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded. ' 'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ' 'during ``evaluate`` and ``predict``.' ) } , ) A_ : Optional[int] = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for test target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} ) A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} ) A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} ) A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} ) A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} ) A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} ) A_ : bool = field( default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , ) def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str: logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) ) def lowerCamelCase__ ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses() check_output_dir(snake_case_ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , snake_case_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(snake_case_ , snake_case_ , snake_case_ ): assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) ) __snake_case = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __snake_case = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(snake_case_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __snake_case = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(snake_case_ , snake_case_ ): __snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(snake_case_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __snake_case = SeqaSeqDataset # Get datasets __snake_case = ( dataset_class( snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) __snake_case = ( dataset_class( snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __snake_case = ( dataset_class( snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer __snake_case = ( build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None ) __snake_case = SeqaSeqTrainer( model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator( snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , ) __snake_case = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) __snake_case = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __snake_case = train_result.metrics __snake_case = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __snake_case = trainer.evaluate(metric_key_prefix='''val''' ) __snake_case = data_args.n_val __snake_case = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.do_predict: logger.info('''*** Predict ***''' ) __snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' ) __snake_case = test_output.metrics __snake_case = data_args.n_test if trainer.is_world_process_zero(): __snake_case = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.predict_with_generate: __snake_case = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ ) __snake_case = lmap(str.strip , snake_case_ ) write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
24
1
import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str , snake_case_ : str ) -> Dict: def get_masked_lm_array(snake_case_ : str ): __snake_case = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case = tf.train.load_variable(snake_case_ , snake_case_ ) if "kernel" in name: __snake_case = array.transpose() return torch.from_numpy(snake_case_ ) def get_encoder_array(snake_case_ : str ): __snake_case = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case = tf.train.load_variable(snake_case_ , snake_case_ ) if "kernel" in name: __snake_case = array.transpose() return torch.from_numpy(snake_case_ ) def get_encoder_layer_array(snake_case_ : int , snake_case_ : str ): __snake_case = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case = tf.train.load_variable(snake_case_ , snake_case_ ) if "kernel" in name: __snake_case = array.transpose() return torch.from_numpy(snake_case_ ) def get_encoder_attention_layer_array(snake_case_ : int , snake_case_ : str , snake_case_ : Any ): __snake_case = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case = tf.train.load_variable(snake_case_ , snake_case_ ) __snake_case = array.reshape(snake_case_ ) if "kernel" in name: __snake_case = array.transpose() return torch.from_numpy(snake_case_ ) print(f"""Loading model based on config from {config_path}...""" ) __snake_case = BertConfig.from_json_file(snake_case_ ) __snake_case = BertForMaskedLM(snake_case_ ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __snake_case = model.bert.encoder.layer[layer_index] # Self-attention __snake_case = layer.attention.self __snake_case = get_encoder_attention_layer_array( snake_case_ , '''_query_dense/kernel''' , self_attn.query.weight.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case_ , '''_query_dense/bias''' , self_attn.query.bias.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case_ , '''_key_dense/kernel''' , self_attn.key.weight.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case_ , '''_key_dense/bias''' , self_attn.key.bias.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case_ , '''_value_dense/kernel''' , self_attn.value.weight.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case_ , '''_value_dense/bias''' , self_attn.value.bias.data.shape ) # Self-attention Output __snake_case = layer.attention.output __snake_case = get_encoder_attention_layer_array( snake_case_ , '''_output_dense/kernel''' , self_output.dense.weight.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case_ , '''_output_dense/bias''' , self_output.dense.bias.data.shape ) __snake_case = get_encoder_layer_array(snake_case_ , '''_attention_layer_norm/gamma''' ) __snake_case = get_encoder_layer_array(snake_case_ , '''_attention_layer_norm/beta''' ) # Intermediate __snake_case = layer.intermediate __snake_case = get_encoder_layer_array(snake_case_ , '''_intermediate_dense/kernel''' ) __snake_case = get_encoder_layer_array(snake_case_ , '''_intermediate_dense/bias''' ) # Output __snake_case = layer.output __snake_case = get_encoder_layer_array(snake_case_ , '''_output_dense/kernel''' ) __snake_case = get_encoder_layer_array(snake_case_ , '''_output_dense/bias''' ) __snake_case = get_encoder_layer_array(snake_case_ , '''_output_layer_norm/gamma''' ) __snake_case = get_encoder_layer_array(snake_case_ , '''_output_layer_norm/beta''' ) # Embeddings __snake_case = get_encoder_array('''_position_embedding_layer/embeddings''' ) __snake_case = get_encoder_array('''_type_embedding_layer/embeddings''' ) __snake_case = get_encoder_array('''_embedding_norm_layer/gamma''' ) __snake_case = get_encoder_array('''_embedding_norm_layer/beta''' ) # LM Head __snake_case = model.cls.predictions.transform __snake_case = get_masked_lm_array('''dense/kernel''' ) __snake_case = get_masked_lm_array('''dense/bias''' ) __snake_case = get_masked_lm_array('''layer_norm/gamma''' ) __snake_case = get_masked_lm_array('''layer_norm/beta''' ) __snake_case = get_masked_lm_array('''embedding_table''' ) # Pooling __snake_case = BertPooler(config=snake_case_ ) __snake_case = get_encoder_array('''_pooler_layer/kernel''' ) __snake_case = get_encoder_array('''_pooler_layer/bias''' ) # Export final model model.save_pretrained(snake_case_ ) # Integration test - should load without any errors ;) __snake_case = BertForMaskedLM.from_pretrained(snake_case_ ) print(new_model.eval() ) print('''Model conversion was done sucessfully!''' ) if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() parser.add_argument( '--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.' ) parser.add_argument( '--bert_config_file', type=str, required=True, help='The config json file corresponding to the BERT model. This specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', type=str, required=True, help='Path to the output PyTorch model.', ) snake_case_ = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
24
from math import pi def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
24
1
import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def lowerCamelCase__ ( ) -> None: print('''Making key files...''' ) make_key_files('''rsa''' , 1024 ) print('''Key files generation successful.''' ) def lowerCamelCase__ ( snake_case_ : int ) -> tuple[tuple[int, int], tuple[int, int]]: print('''Generating prime p...''' ) __snake_case = rabinMiller.generate_large_prime(snake_case_ ) print('''Generating prime q...''' ) __snake_case = rabinMiller.generate_large_prime(snake_case_ ) __snake_case = p * q print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' ) while True: __snake_case = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(snake_case_ , (p - 1) * (q - 1) ) == 1: break print('''Calculating d that is mod inverse of e...''' ) __snake_case = cryptoMath.find_mod_inverse(snake_case_ , (p - 1) * (q - 1) ) __snake_case = (n, e) __snake_case = (n, d) return (public_key, private_key) def lowerCamelCase__ ( snake_case_ : str , snake_case_ : int ) -> None: if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ): print('''\nWARNING:''' ) print( f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" '''Use a different name or delete these files and re-run this program.''' ) sys.exit() __snake_case , __snake_case = generate_key(snake_case_ ) print(f"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(f"""{name}_pubkey.txt""" , '''w''' ) as out_file: out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" ) print(f"""Writing private key to file {name}_privkey.txt...""" ) with open(f"""{name}_privkey.txt""" , '''w''' ) as out_file: out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" ) if __name__ == "__main__": main()
24
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[Any] = 'vit_msn' def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ): """simple docstring""" super().__init__(**a__ ) __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = image_size __snake_case = patch_size __snake_case = num_channels __snake_case = qkv_bias
24
1
from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCAmelCase ): A_ : Optional[Any] = ['onnx'] def __init__(self : Union[str, Any] , *a__ : List[str] , **a__ : Optional[Any] ): """simple docstring""" requires_backends(self , ['''onnx'''] ) @classmethod def a (cls : str , *a__ : Any , **a__ : Tuple ): """simple docstring""" requires_backends(cls , ['''onnx'''] ) @classmethod def a (cls : Dict , *a__ : Any , **a__ : Union[str, Any] ): """simple docstring""" requires_backends(cls , ['''onnx'''] )
24
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def a (self : int , a__ : List[Any]=0 ): """simple docstring""" __snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) ) __snake_case = np.random.RandomState(a__ ) __snake_case = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.7_5, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def a (self : List[Any] ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def a (self : Dict ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : List[str] ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) # warmup pass to apply optimizations __snake_case = pipe(**self.get_dummy_inputs() ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : Any ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : Dict ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : List[str] ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def a (self : List[str] ): """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a (self : Optional[Any] ): """simple docstring""" __snake_case = ort.SessionOptions() __snake_case = False return options def a (self : Optional[Any] ): """simple docstring""" __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __snake_case = init_image.resize((768, 512) ) # using the PNDM scheduler by default __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = '''A fantasy landscape, trending on artstation''' __snake_case = np.random.RandomState(0 ) __snake_case = pipe( prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , ) __snake_case = output.images __snake_case = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def a (self : Dict ): """simple docstring""" __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __snake_case = init_image.resize((768, 512) ) __snake_case = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = '''A fantasy landscape, trending on artstation''' __snake_case = np.random.RandomState(0 ) __snake_case = pipe( prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , ) __snake_case = output.images __snake_case = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
24
1
import qiskit def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> qiskit.result.counts.Counts: __snake_case = qiskit.Aer.get_backend('''aer_simulator''' ) # Create a Quantum Circuit acting on the q register __snake_case = qiskit.QuantumCircuit(snake_case_ , snake_case_ ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1] ) # Execute the circuit on the qasm simulator __snake_case = qiskit.execute(snake_case_ , snake_case_ , shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(snake_case_ ) if __name__ == "__main__": snake_case_ = single_qubit_measure(2, 2) print(F'Total count for various states are: {counts}')
24
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) snake_case_ = logging.getLogger(__name__) @dataclass(frozen=_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ : A_ : str A_ : str A_ : Optional[str] = None A_ : Optional[str] = None A_ : Optional[str] = None @dataclass(frozen=_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ : A_ : List[int] A_ : Optional[List[int]] = None A_ : Optional[List[int]] = None A_ : Optional[Union[int, float]] = None A_ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[InputFeatures] def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ): """simple docstring""" __snake_case = hans_processors[task]() __snake_case = os.path.join( a__ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , ) __snake_case = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case = label_list[2], label_list[1] __snake_case = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case = cached_features_file + '''.lock''' with FileLock(a__ ): if os.path.exists(a__ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case = torch.load(a__ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case = ( processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ ) ) logger.info('''Training examples: %s''' , len(a__ ) ) __snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ ) logger.info('''Saving features into cached file %s''' , a__ ) torch.save(self.features , a__ ) def __len__(self : int ): """simple docstring""" return len(self.features ) def __getitem__(self : Dict , a__ : List[Any] ): """simple docstring""" return self.features[i] def a (self : List[Any] ): """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class SCREAMING_SNAKE_CASE__ : A_ : List[InputFeatures] def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ): """simple docstring""" __snake_case = hans_processors[task]() __snake_case = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case = label_list[2], label_list[1] __snake_case = label_list __snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ ) __snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case = tf.data.Dataset.from_generator( a__ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def a (self : Union[str, Any] ): """simple docstring""" return self.dataset def __len__(self : Dict ): """simple docstring""" return len(self.features ) def __getitem__(self : Any , a__ : Dict ): """simple docstring""" return self.features[i] def a (self : str ): """simple docstring""" return self.label_list class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def a (self : Dict , a__ : Dict ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def a (self : Optional[int] , a__ : Tuple ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def a (self : int ): """simple docstring""" return ["contradiction", "entailment", "neutral"] def a (self : Any , a__ : Optional[int] , a__ : List[Any] ): """simple docstring""" __snake_case = [] for i, line in enumerate(a__ ): if i == 0: continue __snake_case = '''%s-%s''' % (set_type, line[0]) __snake_case = line[5] __snake_case = line[6] __snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case = line[0] examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) ) return examples def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]: __snake_case = {label: i for i, label in enumerate(snake_case_ )} __snake_case = [] for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case = tokenizer( example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , ) __snake_case = label_map[example.label] if example.label in label_map else 0 __snake_case = int(example.pairID ) features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features snake_case_ = { 'hans': 3, } snake_case_ = { 'hans': HansProcessor, }
24
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) snake_case_ = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ['ReformerTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ['ReformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ReformerAttention', 'ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerLayer', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
24
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[str] = ['image_processor', 'tokenizer'] A_ : Optional[Any] = 'CLIPImageProcessor' A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ): """simple docstring""" __snake_case = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a__ , ) __snake_case = kwargs.pop('''feature_extractor''' ) __snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a__ , a__ ) def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ ) if images is not None: __snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ ) if text is not None and images is not None: __snake_case = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ ) def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*a__ , **a__ ) def a (self : Any , *a__ : List[Any] , **a__ : List[str] ): """simple docstring""" return self.tokenizer.decode(*a__ , **a__ ) @property def a (self : int ): """simple docstring""" __snake_case = self.tokenizer.model_input_names __snake_case = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
24
1
import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml snake_case_ = logging.get_logger(__name__) def lowerCamelCase__ ( snake_case_ : bool , snake_case_ : bool ) -> Optional[Any]: def run_func(snake_case_ : Union[str, Any] ): @wraps(snake_case_ ) def run_in_eager_mode(*snake_case_ : str , **snake_case_ : Any ): return func(*snake_case_ , **snake_case_ ) @wraps(snake_case_ ) @tf.function(experimental_compile=snake_case_ ) def run_in_graph_mode(*snake_case_ : List[str] , **snake_case_ : Any ): return func(*snake_case_ , **snake_case_ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( '''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' ) return run_in_eager_mode else: return run_in_graph_mode return run_func def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]: __snake_case = random.Random() __snake_case = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : TensorFlowBenchmarkArguments A_ : PretrainedConfig A_ : str = "TensorFlow" @property def a (self : str ): """simple docstring""" return tf.__version__ def a (self : Optional[int] , a__ : str , a__ : int , a__ : int ): """simple docstring""" __snake_case = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) __snake_case = self._prepare_inference_func(a__ , a__ , a__ ) return self._measure_speed(_inference ) def a (self : Dict , a__ : str , a__ : int , a__ : int ): """simple docstring""" __snake_case = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) __snake_case = self._prepare_train_func(a__ , a__ , a__ ) return self._measure_speed(_train ) def a (self : List[str] , a__ : str , a__ : int , a__ : int ): """simple docstring""" if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a__ ) __snake_case = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) __snake_case = self._prepare_inference_func(a__ , a__ , a__ ) return self._measure_memory(_inference ) def a (self : Tuple , a__ : str , a__ : int , a__ : int ): """simple docstring""" if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a__ ) __snake_case = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) __snake_case = self._prepare_train_func(a__ , a__ , a__ ) return self._measure_memory(_train ) def a (self : Union[str, Any] , a__ : str , a__ : int , a__ : int ): """simple docstring""" __snake_case = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) __snake_case = ( hasattr(a__ , '''architectures''' ) and isinstance(config.architectures , a__ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: __snake_case = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model __snake_case = __import__('''transformers''' , fromlist=[model_class] ) __snake_case = getattr(a__ , a__ ) __snake_case = model_cls(a__ ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: __snake_case = TF_MODEL_MAPPING[config.__class__](a__ ) # encoder-decoder has vocab size saved differently __snake_case = config.vocab_size if hasattr(a__ , '''vocab_size''' ) else config.encoder.vocab_size __snake_case = random_input_ids(a__ , a__ , a__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(a__ , decoder_input_ids=a__ , training=a__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(a__ , training=a__ ) __snake_case = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def a (self : Union[str, Any] , a__ : str , a__ : int , a__ : int ): """simple docstring""" __snake_case = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' ) if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) __snake_case = ( hasattr(a__ , '''architectures''' ) and isinstance(config.architectures , a__ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: __snake_case = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model __snake_case = __import__('''transformers''' , fromlist=[model_class] ) __snake_case = getattr(a__ , a__ ) __snake_case = model_cls(a__ ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: __snake_case = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a__ ) # encoder-decoder has vocab size saved differently __snake_case = config.vocab_size if hasattr(a__ , '''vocab_size''' ) else config.encoder.vocab_size __snake_case = random_input_ids(a__ , a__ , a__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): __snake_case = model(a__ , decoder_input_ids=a__ , labels=a__ , training=a__ )[0] __snake_case = tf.gradients(a__ , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): __snake_case = model(a__ , labels=a__ , training=a__ )[0] __snake_case = tf.gradients(a__ , model.trainable_variables ) return gradients __snake_case = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def a (self : List[Any] , a__ : Dict ): """simple docstring""" with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' ) timeit.repeat(a__ , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average __snake_case = timeit.repeat( a__ , repeat=self.args.repeat , number=10 , ) return min(a__ ) / 1_0.0 except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) def a (self : Dict , a__ : Callable[[], None] ): """simple docstring""" logger.info( '''Note that TensorFlow allocates more memory than ''' '''it might need to speed up computation. ''' '''The memory reported here corresponds to the memory ''' '''reported by `nvidia-smi`, which can vary depending ''' '''on total available memory on the GPU that is used.''' ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( '''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory''' ''' consumption line by line.''' ) __snake_case = start_memory_tracing('''transformers''' ) if self.args.is_tpu: # tpu raise NotImplementedError( '''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking''' ''' with `args.memory=False`''' ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( '''py3nvml not installed, we won\'t log GPU memory usage. ''' '''Install py3nvml (pip install py3nvml) to log information about GPU.''' ) __snake_case = '''N/A''' else: logger.info( '''Measuring total GPU usage on GPU device. Make sure to not have additional processes''' ''' running on the same GPU.''' ) # init nvml nvml.nvmlInit() func() __snake_case = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) __snake_case = nvml.nvmlDeviceGetMemoryInfo(a__ ) __snake_case = meminfo.used __snake_case = Memory(a__ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( '''When enabling line by line tracing, the max peak memory for CPU is inaccurate in''' ''' TensorFlow.''' ) __snake_case = None else: __snake_case = measure_peak_memory_cpu(a__ ) __snake_case = Memory(a__ ) if isinstance(a__ , a__ ) else memory_bytes if self.args.trace_memory_line_by_line: __snake_case = stop_memory_tracing(a__ ) if memory is None: __snake_case = summary.total else: __snake_case = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) return "N/A", None
24
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Dict ): """simple docstring""" __snake_case = logging.get_logger() # the current default level is logging.WARNING __snake_case = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(a__ ) def a (self : Dict ): """simple docstring""" __snake_case = logging.get_verbosity() __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = '''Testing 1, 2, 3''' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , '''''' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) # restore to the original level logging.set_verbosity(a__ ) @mockenv(TRANSFORMERS_VERBOSITY='''error''' ) def a (self : Dict ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() # this action activates the env var __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ ) __snake_case = logging.log_levels[env_level_str] __snake_case = logging.get_verbosity() self.assertEqual( a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level __snake_case = '''''' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='''super-error''' ) def a (self : List[Any] ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() __snake_case = logging.logging.getLogger() with CaptureLogger(a__ ) as cl: # this action activates the env var logging.get_logger('''transformers.models.bart.tokenization_bart''' ) self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out ) # no need to restore as nothing was changed def a (self : Any ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = '''Testing 1, 2, 3''' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ): # nothing should be logged as env var disables this method with CaptureLogger(a__ ) as cl: logger.warning_advice(a__ ) self.assertEqual(cl.out , '''''' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(a__ ) as cl: logger.warning_advice(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) def lowerCamelCase__ ( ) -> str: disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
24
1
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent snake_case_ = {'UserAgent': UserAgent().random} def lowerCamelCase__ ( snake_case_ : Any ) -> dict: __snake_case = script.contents[0] __snake_case = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class SCREAMING_SNAKE_CASE__ : def __init__(self : Optional[Any] , a__ : Tuple ): """simple docstring""" __snake_case = f"""https://www.instagram.com/{username}/""" __snake_case = self.get_json() def a (self : Tuple ): """simple docstring""" __snake_case = requests.get(self.url , headers=a__ ).text __snake_case = BeautifulSoup(a__ , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__(self : Optional[int] ): """simple docstring""" return f"""{self.__class__.__name__}('{self.username}')""" def __str__(self : int ): """simple docstring""" return f"""{self.fullname} ({self.username}) is {self.biography}""" @property def a (self : Optional[Any] ): """simple docstring""" return self.user_data["username"] @property def a (self : List[Any] ): """simple docstring""" return self.user_data["full_name"] @property def a (self : str ): """simple docstring""" return self.user_data["biography"] @property def a (self : Tuple ): """simple docstring""" return self.user_data["business_email"] @property def a (self : str ): """simple docstring""" return self.user_data["external_url"] @property def a (self : str ): """simple docstring""" return self.user_data["edge_followed_by"]["count"] @property def a (self : str ): """simple docstring""" return self.user_data["edge_follow"]["count"] @property def a (self : Dict ): """simple docstring""" return self.user_data["edge_owner_to_timeline_media"]["count"] @property def a (self : List[str] ): """simple docstring""" return self.user_data["profile_pic_url_hd"] @property def a (self : Dict ): """simple docstring""" return self.user_data["is_verified"] @property def a (self : int ): """simple docstring""" return self.user_data["is_private"] def lowerCamelCase__ ( snake_case_ : str = "github" ) -> None: import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions __snake_case = InstagramUser(snake_case_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , snake_case_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 12_0000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() snake_case_ = InstagramUser('github') print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
24
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : List[str] = CpmAntTokenizer A_ : Optional[int] = False def a (self : Optional[int] ): """simple docstring""" super().setUp() __snake_case = [ '''<d>''', '''</d>''', '''<s>''', '''</s>''', '''</_>''', '''<unk>''', '''<pad>''', '''</n>''', '''我''', '''是''', '''C''', '''P''', '''M''', '''A''', '''n''', '''t''', ] __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) @tooslow def a (self : Dict ): """simple docstring""" __snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' ) __snake_case = '''今天天气真好!''' __snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!'''] __snake_case = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) __snake_case = '''今天天气真好!''' __snake_case = [tokenizer.bos_token] + tokens __snake_case = [6, 9802, 1_4962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ ) __snake_case = tokenizer.decode(a__ ) self.assertEqual(a__ , a__ )
24
1
def lowerCamelCase__ ( snake_case_ : list , snake_case_ : int = 0 ) -> list: __snake_case = length or len(snake_case_ ) __snake_case = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: __snake_case , __snake_case = list_data[i + 1], list_data[i] __snake_case = True return list_data if not swapped else bubble_sort(snake_case_ , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
24
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class SCREAMING_SNAKE_CASE__ : def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ): """simple docstring""" __snake_case = parent __snake_case = out_indices if out_indices is not None else [4] __snake_case = stage_names __snake_case = out_features __snake_case = backbone __snake_case = batch_size __snake_case = image_size __snake_case = num_channels __snake_case = use_pretrained_backbone __snake_case = is_training def a (self : Union[str, Any] ): """simple docstring""" __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = self.get_config() return config, pixel_values def a (self : Any ): """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def a (self : List[Any] , a__ : int , a__ : int ): """simple docstring""" __snake_case = TimmBackbone(config=a__ ) model.to(a__ ) model.eval() with torch.no_grad(): __snake_case = model(a__ ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def a (self : str ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case = config_and_inputs __snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch @require_timm class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else () A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {} A_ : List[Any] = False A_ : Dict = False A_ : Any = False A_ : List[Any] = False def a (self : Tuple ): """simple docstring""" __snake_case = TimmBackboneModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ ) def a (self : Any ): """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a (self : int ): """simple docstring""" __snake_case = '''resnet18''' __snake_case = '''microsoft/resnet-18''' __snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ ) __snake_case = AutoBackbone.from_pretrained(a__ ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] ) __snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' ) def a (self : str ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''TimmBackbone initialization is managed on the timm side''' ) def a (self : Union[str, Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def a (self : Optional[int] ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def a (self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : Dict ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : List[Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' ) def a (self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' ) def a (self : List[Any] ): """simple docstring""" pass @unittest.skip('''Safetensors is not supported by timm.''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def a (self : Tuple ): """simple docstring""" pass def a (self : Tuple ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a__ ) def a (self : Dict ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = True __snake_case = self.has_attentions # no need to test all models as different heads yield the same functionality __snake_case = self.all_model_classes[0] __snake_case = model_class(a__ ) model.to(a__ ) __snake_case = self._prepare_for_class(a__ , a__ ) __snake_case = model(**a__ ) __snake_case = outputs[0][-1] # Encoder-/Decoder-only models __snake_case = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __snake_case = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=a__ ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def a (self : Optional[int] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __snake_case = copy.deepcopy(a__ ) __snake_case = None __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __snake_case = copy.deepcopy(a__ ) __snake_case = False __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ )
24
1
import pprint import requests snake_case_ = 'https://zenquotes.io/api' def lowerCamelCase__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def lowerCamelCase__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": snake_case_ = random_quotes() pprint.pprint(response)
24
import os import pytest from transformers.dynamic_module_utils import get_imports snake_case_ = '\nimport os\n' snake_case_ = '\ndef foo():\n import os\n return False\n' snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n' snake_case_ = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , snake_case_ ) def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict: __snake_case = os.path.join(snake_case_ , '''test_file.py''' ) with open(snake_case_ , '''w''' ) as _tmp_file: _tmp_file.write(snake_case_ ) __snake_case = get_imports(snake_case_ ) assert parsed_imports == ["os"]
24
1
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> List[Any]: __snake_case = checkpoints.load_tax_checkpoint(snake_case_ ) __snake_case = flatten_dict(snake_case_ ) return flax_params def lowerCamelCase__ ( snake_case_ : Dict ) -> Optional[int]: __snake_case = {} __snake_case = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } __snake_case = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key __snake_case = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): __snake_case = new_key.replace(snake_case_ , snake_case_ ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): __snake_case = new_key.replace(snake_case_ , snake_case_ ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number __snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , snake_case_ ) __snake_case = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number __snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , snake_case_ ) __snake_case = flax_dict[key] __snake_case = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): __snake_case = torch.from_numpy(converted_dict[key].T ) else: __snake_case = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Union[str, Any]=False , snake_case_ : Optional[int]=False ) -> Union[str, Any]: __snake_case = get_flax_param(snake_case_ ) if not use_large: __snake_case = PixaStructVisionConfig() __snake_case = PixaStructTextConfig() else: __snake_case = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) __snake_case = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) __snake_case = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=snake_case_ ) __snake_case = PixaStructForConditionalGeneration(snake_case_ ) __snake_case = rename_and_convert_flax_params(snake_case_ ) model.load_state_dict(snake_case_ ) __snake_case = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) __snake_case = PixaStructImageProcessor() __snake_case = PixaStructProcessor(image_processor=snake_case_ , tokenizer=snake_case_ ) if use_large: __snake_case = 4096 __snake_case = True # mkdir if needed os.makedirs(snake_case_ , exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) processor.save_pretrained(snake_case_ ) print('''Model saved in {}'''.format(snake_case_ ) ) if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--use_large', action='store_true', help='Use large model.') parser.add_argument('--is_vqa', action='store_true', help='Use large model.') snake_case_ = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
24
import socket def lowerCamelCase__ ( ) -> Any: __snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) __snake_case = socket.gethostname() __snake_case = 1_2312 sock.connect((host, port) ) sock.send(B'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: __snake_case = sock.recv(1024 ) if not data: break out_file.write(snake_case_ ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
24
1
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : Any ) -> Optional[int]: __snake_case = AutoConfig.from_pretrained(snake_case_ ) __snake_case = FlaxAutoModelForSeqaSeqLM.from_config(config=snake_case_ ) __snake_case = checkpoints.load_tax_checkpoint(snake_case_ ) __snake_case = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": __snake_case = '''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": __snake_case = '''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __snake_case = '''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): __snake_case = f"""layers_{str(snake_case_ )}""" # Self-Attention __snake_case = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] __snake_case = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] __snake_case = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] __snake_case = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __snake_case = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization __snake_case = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: __snake_case = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] __snake_case = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: __snake_case = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] __snake_case = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization __snake_case = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning __snake_case = flax_model.params['''encoder''']['''block'''][str(snake_case_ )]['''layer'''] __snake_case = tax_attention_key __snake_case = tax_attention_out __snake_case = tax_attention_query __snake_case = tax_attention_value __snake_case = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __snake_case = tax_global_layer_norm if split_mlp_wi: __snake_case = tax_mlp_wi_a __snake_case = tax_mlp_wi_a else: __snake_case = tax_mlp_wi __snake_case = tax_mlp_wo __snake_case = tax_mlp_layer_norm __snake_case = flax_model_encoder_layer_block # Only for layer 0: __snake_case = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T __snake_case = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __snake_case = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T __snake_case = tax_encoder_global_rel_embedding # Assigning __snake_case = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] __snake_case = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): __snake_case = f"""layers_{str(snake_case_ )}""" # Self-Attention __snake_case = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] __snake_case = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] __snake_case = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] __snake_case = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization __snake_case = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention __snake_case = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] __snake_case = tax_enc_dec_attention_module['''key''']['''kernel'''] __snake_case = tax_enc_dec_attention_module['''out''']['''kernel'''] __snake_case = tax_enc_dec_attention_module['''query''']['''kernel'''] __snake_case = tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization __snake_case = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: __snake_case = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] __snake_case = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: __snake_case = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] __snake_case = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization __snake_case = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning __snake_case = flax_model.params['''decoder''']['''block'''][str(snake_case_ )]['''layer'''] __snake_case = tax_attention_key __snake_case = tax_attention_out __snake_case = tax_attention_query __snake_case = tax_attention_value __snake_case = tax_pre_attention_layer_norm __snake_case = tax_enc_dec_attention_key __snake_case = tax_enc_dec_attention_out __snake_case = tax_enc_dec_attention_query __snake_case = tax_enc_dec_attention_value __snake_case = tax_cross_layer_norm if split_mlp_wi: __snake_case = tax_mlp_wi_a __snake_case = tax_mlp_wi_a else: __snake_case = tax_mlp_wi __snake_case = tax_mlp_wo __snake_case = txa_mlp_layer_norm __snake_case = flax_model_decoder_layer_block # Decoder Normalization __snake_case = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] __snake_case = txa_decoder_norm # Only for layer 0: __snake_case = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T __snake_case = tax_decoder_rel_embedding # Token Embeddings __snake_case = tax_model['''target''']['''token_embedder''']['''embedding'''] __snake_case = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: __snake_case = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(snake_case_ ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.' ) parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.') parser.add_argument( '--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.' ) snake_case_ = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
24
from __future__ import annotations def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive __snake_case = len(snake_case_ ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else __snake_case = array[0] __snake_case = False __snake_case = 1 __snake_case = [] while not is_found and i < array_length: if array[i] < pivot: __snake_case = True __snake_case = [element for element in array[i:] if element >= array[i]] __snake_case = longest_subsequence(snake_case_ ) if len(snake_case_ ) > len(snake_case_ ): __snake_case = temp_array else: i += 1 __snake_case = [element for element in array[1:] if element >= pivot] __snake_case = [pivot, *longest_subsequence(snake_case_ )] if len(snake_case_ ) > len(snake_case_ ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
24
1
import socket def lowerCamelCase__ ( ) -> Any: __snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) __snake_case = socket.gethostname() __snake_case = 1_2312 sock.connect((host, port) ) sock.send(B'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: __snake_case = sock.recv(1024 ) if not data: break out_file.write(snake_case_ ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
24
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = num_labels __snake_case = scope __snake_case = range_bbox def a (self : Optional[int] ): """simple docstring""" __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case = bbox[i, j, 3] __snake_case = bbox[i, j, 1] __snake_case = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case = bbox[i, j, 2] __snake_case = bbox[i, j, 0] __snake_case = t __snake_case = None if self.use_input_mask: __snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __snake_case = None if self.use_token_type_ids: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def a (self : List[str] ): """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ): """simple docstring""" __snake_case = LiltModel(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ ) __snake_case = model(a__ , bbox=a__ , token_type_ids=a__ ) __snake_case = model(a__ , bbox=a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ): """simple docstring""" __snake_case = self.num_labels __snake_case = LiltForTokenClassification(config=a__ ) model.to(a__ ) model.eval() __snake_case = model( a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ): """simple docstring""" __snake_case = LiltForQuestionAnswering(config=a__ ) model.to(a__ ) model.eval() __snake_case = model( a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a (self : Tuple ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : List[Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) A_ : Any = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) A_ : Optional[int] = False A_ : List[Any] = False def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ): """simple docstring""" return True def a (self : Union[str, Any] ): """simple docstring""" __snake_case = LiltModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 ) def a (self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() def a (self : int ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def a (self : List[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case = type self.model_tester.create_and_check_model(*a__ ) def a (self : Optional[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a__ ) def a (self : Union[str, Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a__ ) @slow def a (self : Optional[int] ): """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = LiltModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) @require_torch @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Tuple ): """simple docstring""" __snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ ) __snake_case = torch.tensor([[1, 2]] , device=a__ ) __snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ ) # forward pass with torch.no_grad(): __snake_case = model(input_ids=a__ , bbox=a__ ) __snake_case = torch.Size([1, 2, 768] ) __snake_case = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , ) self.assertTrue(outputs.last_hidden_state.shape , a__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
24
1
from math import loga def lowerCamelCase__ ( snake_case_ : int ) -> int: if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(snake_case_ , snake_case_ ): raise TypeError('''Input value must be a \'int\' type''' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
24
import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class SCREAMING_SNAKE_CASE__ : @staticmethod def a (*a__ : List[str] , **a__ : List[str] ): """simple docstring""" pass def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]: return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. snake_case_ = ( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): A_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def a (self : List[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ): """simple docstring""" __snake_case = pipeline( '''document-question-answering''' , model=a__ , tokenizer=a__ , image_processor=a__ ) __snake_case = INVOICE_URL __snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) ) __snake_case = '''What is the placebo?''' __snake_case = [ { '''image''': load_image(a__ ), '''question''': question, }, { '''image''': image, '''question''': question, }, { '''image''': image, '''question''': question, '''word_boxes''': word_boxes, }, ] return dqa_pipeline, examples def a (self : Union[str, Any] , a__ : Optional[int] , a__ : Dict ): """simple docstring""" __snake_case = dqa_pipeline(a__ , top_k=2 ) self.assertEqual( a__ , [ [ {'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )}, {'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def a (self : Dict ): """simple docstring""" __snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' ) __snake_case = INVOICE_URL __snake_case = '''How many cats are there?''' __snake_case = [ {'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39}, {'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40}, ] __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ ) __snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably __snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual(a__ , [] ) # We can optionnally pass directly the words and bounding boxes __snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' __snake_case = [] __snake_case = [] __snake_case = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 ) self.assertEqual(a__ , [] ) @slow @require_torch @require_detectrona @require_pytesseract def a (self : str ): """simple docstring""" __snake_case = pipeline( '''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def a (self : List[Any] ): """simple docstring""" __snake_case = pipeline( '''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def a (self : Tuple ): """simple docstring""" __snake_case = AutoTokenizer.from_pretrained( '''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ ) __snake_case = pipeline( '''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) __snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) __snake_case = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] ] * 2 , ) __snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) ) # This model should also work if `image` is set to None __snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def a (self : Dict ): """simple docstring""" __snake_case = AutoTokenizer.from_pretrained( '''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ ) __snake_case = pipeline( '''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , max_seq_len=50 , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] ] * 2 , ) __snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) ) # This model should also work if `image` is set to None __snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) @slow @require_torch def a (self : Tuple ): """simple docstring""" __snake_case = pipeline( '''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{'''answer''': '''us-001'''}] ) @require_tf @unittest.skip('''Document question answering not implemented in TF''' ) def a (self : List[str] ): """simple docstring""" pass
24
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) snake_case_ = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST', 'UniSpeechForCTC', 'UniSpeechForPreTraining', 'UniSpeechForSequenceClassification', 'UniSpeechModel', 'UniSpeechPreTrainedModel', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
24
from __future__ import annotations def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]: __snake_case = [] __snake_case = [] __snake_case = 0 __snake_case = sum(snake_case_ ) create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) return result def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None: if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum: return if sum(snake_case_ ) == max_sum: result.append(snake_case_ ) return for index in range(snake_case_ , len(snake_case_ ) ): create_state_space_tree( snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , ) snake_case_ = [3, 34, 4, 12, 5, 2] snake_case_ = 9 snake_case_ = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
24
1
def lowerCamelCase__ ( snake_case_ : int=2_8123 ) -> int: __snake_case = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i __snake_case = set() __snake_case = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(snake_case_ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
24
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = image_size __snake_case = num_channels __snake_case = num_stages __snake_case = hidden_sizes __snake_case = depths __snake_case = is_training __snake_case = use_labels __snake_case = intermediate_size __snake_case = hidden_act __snake_case = num_labels __snake_case = initializer_range __snake_case = out_features __snake_case = out_indices __snake_case = scope def a (self : Dict ): """simple docstring""" __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.num_labels ) __snake_case = self.get_config() return config, pixel_values, labels def a (self : List[str] ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ): """simple docstring""" __snake_case = ConvNextModel(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ): """simple docstring""" __snake_case = ConvNextForImageClassification(a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ): """simple docstring""" __snake_case = ConvNextBackbone(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __snake_case = None __snake_case = ConvNextBackbone(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a (self : Tuple ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case = config_and_inputs __snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : Dict = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) A_ : Optional[Any] = ( {'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification} if is_torch_available() else {} ) A_ : Dict = True A_ : Optional[Any] = False A_ : int = False A_ : int = False A_ : List[str] = False def a (self : List[str] ): """simple docstring""" __snake_case = ConvNextModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 ) def a (self : Tuple ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a (self : str ): """simple docstring""" return @unittest.skip(reason='''ConvNext does not use inputs_embeds''' ) def a (self : int ): """simple docstring""" pass @unittest.skip(reason='''ConvNext does not support input and output embeddings''' ) def a (self : Dict ): """simple docstring""" pass @unittest.skip(reason='''ConvNext does not use feedforward chunking''' ) def a (self : List[Any] ): """simple docstring""" pass def a (self : Optional[Any] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a__ ) def a (self : List[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def a (self : Dict ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*a__ ) def a (self : Dict ): """simple docstring""" def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ): __snake_case = model_class(a__ ) model.to(a__ ) model.eval() with torch.no_grad(): __snake_case = model(**self._prepare_for_class(a__ , a__ ) ) __snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case = self.model_tester.num_stages self.assertEqual(len(a__ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = True check_hidden_states_output(a__ , a__ , a__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case = True check_hidden_states_output(a__ , a__ , a__ ) def a (self : Optional[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a__ ) @slow def a (self : Any ): """simple docstring""" for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = ConvNextModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) def lowerCamelCase__ ( ) -> List[str]: __snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def a (self : Tuple ): """simple docstring""" return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None @slow def a (self : Optional[Any] ): """simple docstring""" __snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ ) __snake_case = self.default_image_processor __snake_case = prepare_img() __snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ ) # forward pass with torch.no_grad(): __snake_case = model(**a__ ) # verify the logits __snake_case = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , a__ ) __snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ): A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else () A_ : List[Any] = ConvNextConfig A_ : Optional[Any] = False def a (self : Optional[int] ): """simple docstring""" __snake_case = ConvNextModelTester(self )
24
1
def lowerCamelCase__ ( snake_case_ : int ) -> str: if number > 0: raise ValueError('''input must be a negative integer''' ) __snake_case = len(bin(snake_case_ )[3:] ) __snake_case = bin(abs(snake_case_ ) - (1 << binary_number_length) )[3:] __snake_case = ( ( '''1''' + '''0''' * (binary_number_length - len(snake_case_ )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
24
def lowerCamelCase__ ( ) -> int: return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(snake_case_ , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
24
1
def lowerCamelCase__ ( ) -> int: return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(snake_case_ , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
24
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : List[Any] = BartphoTokenizer A_ : List[str] = False A_ : Optional[Any] = True def a (self : Tuple ): """simple docstring""" super().setUp() __snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] __snake_case = dict(zip(a__ , range(len(a__ ) ) ) ) __snake_case = {'''unk_token''': '''<unk>'''} __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ) with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(f"""{token} {vocab_tokens[token]}\n""" ) __snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def a (self : str , **a__ : str ): """simple docstring""" kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ ) def a (self : str , a__ : Any ): """simple docstring""" __snake_case = '''This is a là test''' __snake_case = '''This is a<unk><unk> test''' return input_text, output_text def a (self : Dict ): """simple docstring""" __snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map ) __snake_case = '''This is a là test''' __snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split() __snake_case = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) __snake_case = tokens + [tokenizer.unk_token] __snake_case = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
24
1
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate snake_case_ = trt.Logger(trt.Logger.WARNING) snake_case_ = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) snake_case_ = logging.getLogger(__name__) snake_case_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) snake_case_ = parser.parse_args() if args.tokenizer_name: snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) snake_case_ = args.per_device_eval_batch_size snake_case_ = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties snake_case_ = True snake_case_ = 'temp_engine/bert-fp32.engine' if args.fpaa: snake_case_ = 'temp_engine/bert-fp16.engine' if args.inta: snake_case_ = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') snake_case_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network snake_case_ = [network.get_input(i) for i in range(network.num_inputs)] snake_case_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: snake_case_ = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) snake_case_ = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) snake_case_ = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] ) -> Optional[int]: __snake_case = np.asarray(inputs['''input_ids'''] , dtype=np.intaa ) __snake_case = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa ) __snake_case = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , snake_case_ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , snake_case_ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , snake_case_ ) # start time __snake_case = time.time() # Run inference context.execute_async( bindings=[int(snake_case_ ) for d_inp in d_inputs] + [int(snake_case_ ), int(snake_case_ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(snake_case_ , snake_case_ , snake_case_ ) cuda.memcpy_dtoh_async(snake_case_ , snake_case_ , snake_case_ ) # Synchronize the stream and take time stream.synchronize() # end time __snake_case = time.time() __snake_case = end_time - start_time __snake_case = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. snake_case_ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. snake_case_ = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. snake_case_ = raw_datasets['validation'].column_names snake_case_ = 'question' if 'question' in column_names else column_names[0] snake_case_ = 'context' if 'context' in column_names else column_names[1] snake_case_ = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). snake_case_ = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the' F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) snake_case_ = min(args.max_seq_length, tokenizer.model_max_length) def lowerCamelCase__ ( snake_case_ : Dict ) -> str: # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace __snake_case = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __snake_case = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=snake_case_ , stride=args.doc_stride , return_overflowing_tokens=snake_case_ , return_offsets_mapping=snake_case_ , padding='''max_length''' , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __snake_case = tokenized_examples.pop('''overflow_to_sample_mapping''' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __snake_case = [] for i in range(len(tokenized_examples['''input_ids'''] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __snake_case = tokenized_examples.sequence_ids(snake_case_ ) __snake_case = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __snake_case = sample_mapping[i] tokenized_examples["example_id"].append(examples['''id'''][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __snake_case = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] ) ] return tokenized_examples snake_case_ = raw_datasets['validation'] # Validation Feature Creation snake_case_ = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) snake_case_ = default_data_collator snake_case_ = eval_dataset.remove_columns(['example_id', 'offset_mapping']) snake_case_ = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[Any]="eval" ) -> int: # Post-processing: we match the start logits and end logits to answers in the original context. __snake_case = postprocess_qa_predictions( examples=snake_case_ , features=snake_case_ , predictions=snake_case_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=snake_case_ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __snake_case = [ {'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items() ] else: __snake_case = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()] __snake_case = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=snake_case_ , label_ids=snake_case_ ) snake_case_ = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def lowerCamelCase__ ( snake_case_ : int ) -> List[Any]: return trt.volume(engine.get_binding_shape(snake_case_ ) ) * engine.get_binding_dtype(snake_case_ ).itemsize # Allocate device memory for inputs and outputs. snake_case_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer snake_case_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) snake_case_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) snake_case_ = cuda.mem_alloc(h_outputa.nbytes) snake_case_ = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. snake_case_ = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(F' Num examples = {len(eval_dataset)}') logger.info(F' Batch size = {args.per_device_eval_batch_size}') snake_case_ = 0.0 snake_case_ = 0 snake_case_ = timeit.default_timer() snake_case_ = None for step, batch in enumerate(eval_dataloader): snake_case_ , snake_case_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 snake_case_ , snake_case_ = outputs snake_case_ = torch.tensor(start_logits) snake_case_ = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered snake_case_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) snake_case_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) snake_case_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) snake_case_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: snake_case_ = nested_truncate(all_preds, len(eval_dataset)) snake_case_ = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1000)) logger.info('Total Number of Inference = %d', niter) snake_case_ = post_processing_function(eval_examples, eval_dataset, all_preds) snake_case_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(F'Evaluation metrics: {eval_metric}')
24
def lowerCamelCase__ ( snake_case_ : int ) -> int: if not isinstance(snake_case_ , snake_case_ ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) __snake_case = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
24
1
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE__ ( nn.Module ): A_ : int A_ : jnp.dtype = jnp.floataa def a (self : str ): """simple docstring""" __snake_case = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__(self : Dict , a__ : Any ): """simple docstring""" __snake_case , __snake_case , __snake_case , __snake_case = hidden_states.shape __snake_case = jax.image.resize( a__ , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , ) __snake_case = self.conv(a__ ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): A_ : int A_ : jnp.dtype = jnp.floataa def a (self : Dict ): """simple docstring""" __snake_case = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__(self : Dict , a__ : List[Any] ): """simple docstring""" __snake_case = self.conv(a__ ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): A_ : int A_ : int = None A_ : float = 0.0 A_ : bool = None A_ : jnp.dtype = jnp.floataa def a (self : Union[str, Any] ): """simple docstring""" __snake_case = self.in_channels if self.out_channels is None else self.out_channels __snake_case = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) __snake_case = nn.Conv( a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __snake_case = nn.Dense(a__ , dtype=self.dtype ) __snake_case = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) __snake_case = nn.Dropout(self.dropout_prob ) __snake_case = nn.Conv( a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __snake_case = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __snake_case = None if use_nin_shortcut: __snake_case = nn.Conv( a__ , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , ) def __call__(self : Optional[Any] , a__ : int , a__ : List[Any] , a__ : str=True ): """simple docstring""" __snake_case = hidden_states __snake_case = self.norma(a__ ) __snake_case = nn.swish(a__ ) __snake_case = self.conva(a__ ) __snake_case = self.time_emb_proj(nn.swish(a__ ) ) __snake_case = jnp.expand_dims(jnp.expand_dims(a__ , 1 ) , 1 ) __snake_case = hidden_states + temb __snake_case = self.norma(a__ ) __snake_case = nn.swish(a__ ) __snake_case = self.dropout(a__ , a__ ) __snake_case = self.conva(a__ ) if self.conv_shortcut is not None: __snake_case = self.conv_shortcut(a__ ) return hidden_states + residual
24
from math import loga def lowerCamelCase__ ( snake_case_ : int ) -> int: if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(snake_case_ , snake_case_ ): raise TypeError('''Input value must be a \'int\' type''' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
24
1
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str ) -> bool: __snake_case = len(snake_case_ ) + 1 __snake_case = len(snake_case_ ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. __snake_case = [[0 for i in range(snake_case_ )] for j in range(snake_case_ )] # since string of zero length match pattern of zero length __snake_case = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , snake_case_ ): __snake_case = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , snake_case_ ): __snake_case = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , snake_case_ ): for j in range(1 , snake_case_ ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": __snake_case = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: __snake_case = 1 elif pattern[j - 2] in (input_string[i - 1], "."): __snake_case = dp[i - 1][j] else: __snake_case = 0 else: __snake_case = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") snake_case_ = 'aab' snake_case_ = 'c*a*b' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(F'{input_string} matches the given pattern {pattern}') else: print(F'{input_string} does not match with the given pattern {pattern}')
24
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING snake_case_ = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ): """simple docstring""" super().__init__(*a__ , **a__ ) requires_backends(self , '''vision''' ) self.check_model_type(a__ ) def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ): """simple docstring""" return super().__call__(a__ , **a__ ) def a (self : Dict , **a__ : Any ): """simple docstring""" return {}, {}, {} def a (self : List[str] , a__ : Any ): """simple docstring""" __snake_case = load_image(a__ ) __snake_case = image.size __snake_case = self.image_processor(images=a__ , return_tensors=self.framework ) return model_inputs def a (self : int , a__ : List[Any] ): """simple docstring""" __snake_case = self.model(**a__ ) return model_outputs def a (self : int , a__ : str ): """simple docstring""" __snake_case = model_outputs.predicted_depth __snake_case = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ ) __snake_case = prediction.squeeze().cpu().numpy() __snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' ) __snake_case = Image.fromarray(a__ ) __snake_case = {} __snake_case = predicted_depth __snake_case = depth return output_dict
24
1
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __init__(self : Optional[Any] , a__ : str , a__ : Any=7 , a__ : List[Any]=3 , a__ : List[Any]=18 , a__ : Optional[int]=30 , a__ : Dict=400 , a__ : str=True , a__ : int=None , a__ : List[str]=True , a__ : Union[str, Any]=None , a__ : List[str]=True , a__ : int=[0.5, 0.5, 0.5] , a__ : Optional[int]=[0.5, 0.5, 0.5] , a__ : Any=False , ): """simple docstring""" __snake_case = size if size is not None else {'''height''': 20, '''width''': 20} __snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __snake_case = parent __snake_case = batch_size __snake_case = num_channels __snake_case = image_size __snake_case = min_resolution __snake_case = max_resolution __snake_case = do_resize __snake_case = size __snake_case = do_center_crop __snake_case = crop_size __snake_case = do_normalize __snake_case = image_mean __snake_case = image_std __snake_case = do_reduce_labels def a (self : List[str] ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def lowerCamelCase__ ( ) -> Any: __snake_case = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) __snake_case = Image.open(dataset[0]['''file'''] ) __snake_case = Image.open(dataset[1]['''file'''] ) return image, map def lowerCamelCase__ ( ) -> List[Any]: __snake_case = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) __snake_case = Image.open(ds[0]['''file'''] ) __snake_case = Image.open(ds[1]['''file'''] ) __snake_case = Image.open(ds[2]['''file'''] ) __snake_case = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : Optional[int] = BeitImageProcessor if is_vision_available() else None def a (self : Union[str, Any] ): """simple docstring""" __snake_case = BeitImageProcessingTester(self ) @property def a (self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def a (self : List[Any] ): """simple docstring""" __snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a__ , '''do_resize''' ) ) self.assertTrue(hasattr(a__ , '''size''' ) ) self.assertTrue(hasattr(a__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(a__ , '''center_crop''' ) ) self.assertTrue(hasattr(a__ , '''do_normalize''' ) ) self.assertTrue(hasattr(a__ , '''image_mean''' ) ) self.assertTrue(hasattr(a__ , '''image_std''' ) ) def a (self : Tuple ): """simple docstring""" __snake_case = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , a__ ) __snake_case = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=a__ ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , a__ ) def a (self : Any ): """simple docstring""" pass def a (self : Any ): """simple docstring""" __snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ ) for image in image_inputs: self.assertIsInstance(a__ , Image.Image ) # Test not batched input __snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case = image_processing(a__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def a (self : List[str] ): """simple docstring""" __snake_case = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ ) for image in image_inputs: self.assertIsInstance(a__ , np.ndarray ) # Test not batched input __snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case = image_processing(a__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def a (self : Any ): """simple docstring""" __snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ ) for image in image_inputs: self.assertIsInstance(a__ , torch.Tensor ) # Test not batched input __snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case = image_processing(a__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def a (self : Dict ): """simple docstring""" __snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ ) __snake_case = [] for image in image_inputs: self.assertIsInstance(a__ , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input __snake_case = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test batched __snake_case = image_processing(a__ , a__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test not batched input (PIL images) __snake_case , __snake_case = prepare_semantic_single_inputs() __snake_case = image_processing(a__ , a__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) # Test batched input (PIL images) __snake_case , __snake_case = prepare_semantic_batch_inputs() __snake_case = image_processing(a__ , a__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 ) def a (self : str ): """simple docstring""" __snake_case = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 __snake_case , __snake_case = prepare_semantic_single_inputs() __snake_case = image_processing(a__ , a__ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 150 ) __snake_case = True __snake_case = image_processing(a__ , a__ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 255 )
24
from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def lowerCamelCase__ ( ) -> Any: import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join __snake_case = '''__test_patch_submodule_mock__''' with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def lowerCamelCase__ ( ) -> Any: assert _test_patching.open is open __snake_case = '''__test_patch_submodule_builtin_mock__''' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , '''open''' , snake_case_ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def lowerCamelCase__ ( ) -> List[str]: # pandas.read_csv is not present in _test_patching __snake_case = '''__test_patch_submodule_missing_mock__''' with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ): pass def lowerCamelCase__ ( ) -> Union[str, Any]: # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point __snake_case = '''__test_patch_submodule_missing_builtin_mock__''' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , '''len''' , snake_case_ ) is None with patch_submodule(_test_patching , '''len''' , snake_case_ ): assert _test_patching.len is mock assert _test_patching.len is len def lowerCamelCase__ ( ) -> Union[str, Any]: __snake_case = '''__test_patch_submodule_start_and_stop_mock__''' __snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def lowerCamelCase__ ( ) -> Optional[int]: from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join __snake_case = '''__test_patch_submodule_successive_join__''' __snake_case = '''__test_patch_submodule_successive_dirname__''' __snake_case = '''__test_patch_submodule_successive_rename__''' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ): with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ): with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ): with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ): with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def lowerCamelCase__ ( ) -> Tuple: __snake_case = '''__test_patch_submodule_doesnt_exist_mock__''' with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ): pass with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ): pass
24
1
from math import factorial def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> int: # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError('''Please enter positive integers for n and k where n >= k''' ) return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k )) if __name__ == "__main__": print( 'The number of five-card hands possible from a standard', F'fifty-two card deck is: {combinations(52, 5)}\n', ) print( 'If a class of 40 students must be arranged into groups of', F'4 for group projects, there are {combinations(40, 4)} ways', 'to arrange them.\n', ) print( 'If 10 teams are competing in a Formula One race, there', F'are {combinations(10, 3)} ways that first, second and', 'third place can be awarded.', )
24
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) snake_case_ = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE__ : A_ : str = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} ) A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} ) @dataclass class SCREAMING_SNAKE_CASE__ : A_ : str = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) A_ : Optional[str] = field( default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , ) A_ : Optional[int] = field( default=1_024 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A_ : Optional[int] = field( default=128 , metadata={ 'help': ( 'The maximum total sequence length for target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A_ : Optional[int] = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for validation target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded. ' 'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ' 'during ``evaluate`` and ``predict``.' ) } , ) A_ : Optional[int] = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for test target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} ) A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} ) A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} ) A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} ) A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} ) A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} ) A_ : bool = field( default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , ) def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str: logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) ) def lowerCamelCase__ ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses() check_output_dir(snake_case_ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , snake_case_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(snake_case_ , snake_case_ , snake_case_ ): assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) ) __snake_case = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __snake_case = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(snake_case_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __snake_case = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(snake_case_ , snake_case_ ): __snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(snake_case_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __snake_case = SeqaSeqDataset # Get datasets __snake_case = ( dataset_class( snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) __snake_case = ( dataset_class( snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __snake_case = ( dataset_class( snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer __snake_case = ( build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None ) __snake_case = SeqaSeqTrainer( model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator( snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , ) __snake_case = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) __snake_case = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __snake_case = train_result.metrics __snake_case = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __snake_case = trainer.evaluate(metric_key_prefix='''val''' ) __snake_case = data_args.n_val __snake_case = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.do_predict: logger.info('''*** Predict ***''' ) __snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' ) __snake_case = test_output.metrics __snake_case = data_args.n_test if trainer.is_world_process_zero(): __snake_case = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.predict_with_generate: __snake_case = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ ) __snake_case = lmap(str.strip , snake_case_ ) write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
24
1
import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin snake_case_ = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right snake_case_ = 256047 snake_case_ = 256145 @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : Any = NllbTokenizer A_ : Tuple = NllbTokenizerFast A_ : Union[str, Any] = True A_ : List[str] = True A_ : int = {} def a (self : Any ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __snake_case = NllbTokenizer(a__ , keep_accents=a__ ) tokenizer.save_pretrained(self.tmpdirname ) def a (self : List[Any] ): """simple docstring""" __snake_case = NllbTokenizer(a__ , keep_accents=a__ ) __snake_case = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __snake_case = tokenizer.convert_tokens_to_ids(a__ ) self.assertListEqual( a__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __snake_case = tokenizer.convert_ids_to_tokens(a__ ) self.assertListEqual( a__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def a (self : Optional[Any] ): """simple docstring""" __snake_case = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __snake_case = self.rust_tokenizer_class.from_pretrained(a__ , **a__ ) __snake_case = self.tokenizer_class.from_pretrained(a__ , **a__ ) __snake_case = tempfile.mkdtemp() __snake_case = tokenizer_r.save_pretrained(a__ ) __snake_case = tokenizer_p.save_pretrained(a__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __snake_case = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(a__ , a__ ) # Checks everything loads correctly in the same way __snake_case = tokenizer_r.from_pretrained(a__ ) __snake_case = tokenizer_p.from_pretrained(a__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(a__ , a__ ) ) shutil.rmtree(a__ ) # Save tokenizer rust, legacy_format=True __snake_case = tempfile.mkdtemp() __snake_case = tokenizer_r.save_pretrained(a__ , legacy_format=a__ ) __snake_case = tokenizer_p.save_pretrained(a__ ) # Checks it save with the same files self.assertSequenceEqual(a__ , a__ ) # Checks everything loads correctly in the same way __snake_case = tokenizer_r.from_pretrained(a__ ) __snake_case = tokenizer_p.from_pretrained(a__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(a__ , a__ ) ) shutil.rmtree(a__ ) # Save tokenizer rust, legacy_format=False __snake_case = tempfile.mkdtemp() __snake_case = tokenizer_r.save_pretrained(a__ , legacy_format=a__ ) __snake_case = tokenizer_p.save_pretrained(a__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __snake_case = tokenizer_r.from_pretrained(a__ ) __snake_case = tokenizer_p.from_pretrained(a__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(a__ , a__ ) ) shutil.rmtree(a__ ) @require_torch def a (self : Optional[Any] ): """simple docstring""" if not self.test_seqaseq: return __snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # Longer text that will definitely require truncation. __snake_case = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for''' ''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons''' ''' will only worsen the violence and misery for millions of people.''', ] __snake_case = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al''' ''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi''' ''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] try: __snake_case = tokenizer.prepare_seqaseq_batch( src_texts=a__ , tgt_texts=a__ , max_length=3 , max_target_length=10 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 10 ) # max_target_length will default to max_length if not specified __snake_case = tokenizer.prepare_seqaseq_batch( a__ , tgt_texts=a__ , max_length=3 , return_tensors='''pt''' ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) __snake_case = tokenizer.prepare_seqaseq_batch( src_texts=a__ , max_length=3 , max_target_length=10 , return_tensors='''pt''' ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn('''decoder_input_ids''' , a__ ) @unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' ) def a (self : List[str] ): """simple docstring""" pass def a (self : int ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __snake_case = [AddedToken('''<special>''' , lstrip=a__ )] __snake_case = self.rust_tokenizer_class.from_pretrained( a__ , additional_special_tokens=a__ , **a__ ) __snake_case = tokenizer_r.encode('''Hey this is a <special> token''' ) __snake_case = tokenizer_r.encode('''<special>''' , add_special_tokens=a__ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: __snake_case = self.rust_tokenizer_class.from_pretrained( a__ , additional_special_tokens=a__ , **a__ , ) __snake_case = self.tokenizer_class.from_pretrained( a__ , additional_special_tokens=a__ , **a__ ) __snake_case = tokenizer_p.encode('''Hey this is a <special> token''' ) __snake_case = tokenizer_cr.encode('''Hey this is a <special> token''' ) self.assertEqual(a__ , a__ ) self.assertEqual(a__ , a__ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): A_ : List[Any] = 'facebook/nllb-200-distilled-600M' A_ : List[str] = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] A_ : List[Any] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] A_ : List[Any] = [ 256_047, 16_297, 134_408, 8_165, 248_066, 14_734, 950, 1_135, 105_721, 3_573, 83, 27_352, 108, 49_486, 2, ] @classmethod def a (cls : Tuple ): """simple docstring""" __snake_case = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' ) __snake_case = 1 return cls def a (self : Optional[int] ): """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 25_6001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 25_6002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 25_6057 ) def a (self : Union[str, Any] ): """simple docstring""" __snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , a__ ) def a (self : Optional[Any] ): """simple docstring""" self.assertIn(a__ , self.tokenizer.all_special_ids ) # fmt: off __snake_case = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047] # fmt: on __snake_case = self.tokenizer.decode(a__ , skip_special_tokens=a__ ) __snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a__ ) self.assertEqual(a__ , a__ ) self.assertNotIn(self.tokenizer.eos_token , a__ ) def a (self : Optional[int] ): """simple docstring""" __snake_case = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , a__ ) __snake_case = 10 __snake_case = self.tokenizer(a__ , max_length=a__ , truncation=a__ ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , a__ ) self.assertEqual(len(a__ ) , a__ ) def a (self : Tuple ): """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_6203, 3] ) def a (self : Any ): """simple docstring""" __snake_case = tempfile.mkdtemp() __snake_case = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(a__ ) __snake_case = NllbTokenizer.from_pretrained(a__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a__ ) @require_torch def a (self : str ): """simple docstring""" __snake_case = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=a__ , truncation=a__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) __snake_case = shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''] ) self.assertIsInstance(a__ , a__ ) self.assertEqual((2, 15) , batch.input_ids.shape ) self.assertEqual((2, 15) , batch.attention_mask.shape ) __snake_case = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , a__ ) self.assertEqual(a__ , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a (self : Union[str, Any] ): """simple docstring""" __snake_case = self.tokenizer(self.src_text , padding=a__ , truncation=a__ , max_length=3 , return_tensors='''pt''' ) __snake_case = self.tokenizer( text_target=self.tgt_text , padding=a__ , truncation=a__ , max_length=10 , return_tensors='''pt''' ) __snake_case = targets['''input_ids'''] __snake_case = shift_tokens_right( a__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a (self : str ): """simple docstring""" __snake_case = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' ) self.assertEqual( nested_simplify(a__ ) , { # A, test, EOS, en_XX '''input_ids''': [[25_6047, 70, 7356, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_6057, } , ) @require_torch def a (self : Optional[Any] ): """simple docstring""" __snake_case = True __snake_case = self.tokenizer( '''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' ) self.assertEqual( inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] ) __snake_case = False __snake_case = self.tokenizer( '''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' ) self.assertEqual( inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
24
from math import pi def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
24
1
import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py snake_case_ = 'src/transformers' snake_case_ = 'docs/source/en' snake_case_ = '.' def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Dict ) -> int: with open(snake_case_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __snake_case = f.readlines() # Find the start prompt. __snake_case = 0 while not lines[start_index].startswith(snake_case_ ): start_index += 1 start_index += 1 __snake_case = start_index while not lines[end_index].startswith(snake_case_ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | snake_case_ = 'Model|Encoder|Decoder|ForConditionalGeneration' # Regexes that match TF/Flax/PT model names. snake_case_ = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') snake_case_ = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. snake_case_ = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # This is to make sure the transformers module imported is the one in the repo. snake_case_ = direct_transformers_import(TRANSFORMERS_PATH) def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Union[str, Any]: __snake_case = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , snake_case_ ) return [m.group(0 ) for m in matches] def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ) -> Optional[Any]: __snake_case = 2 if text == '''✅''' or text == '''❌''' else len(snake_case_ ) __snake_case = (width - text_length) // 2 __snake_case = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowerCamelCase__ ( ) -> List[str]: __snake_case = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __snake_case = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } __snake_case = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. __snake_case = collections.defaultdict(snake_case_ ) __snake_case = collections.defaultdict(snake_case_ ) __snake_case = collections.defaultdict(snake_case_ ) __snake_case = collections.defaultdict(snake_case_ ) __snake_case = collections.defaultdict(snake_case_ ) # Let's lookup through all transformers object (once). for attr_name in dir(snake_case_ ): __snake_case = None if attr_name.endswith('''Tokenizer''' ): __snake_case = slow_tokenizers __snake_case = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): __snake_case = fast_tokenizers __snake_case = attr_name[:-13] elif _re_tf_models.match(snake_case_ ) is not None: __snake_case = tf_models __snake_case = _re_tf_models.match(snake_case_ ).groups()[0] elif _re_flax_models.match(snake_case_ ) is not None: __snake_case = flax_models __snake_case = _re_flax_models.match(snake_case_ ).groups()[0] elif _re_pt_models.match(snake_case_ ) is not None: __snake_case = pt_models __snake_case = _re_pt_models.match(snake_case_ ).groups()[0] if lookup_dict is not None: while len(snake_case_ ) > 0: if attr_name in model_name_to_prefix.values(): __snake_case = True break # Try again after removing the last word in the name __snake_case = ''''''.join(camel_case_split(snake_case_ )[:-1] ) # Let's build that table! __snake_case = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) __snake_case = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). __snake_case = [len(snake_case_ ) + 2 for c in columns] __snake_case = max([len(snake_case_ ) for name in model_names] ) + 2 # Build the table per se __snake_case = '''|''' + '''|'''.join([_center_text(snake_case_ , snake_case_ ) for c, w in zip(snake_case_ , snake_case_ )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" __snake_case = {True: '''✅''', False: '''❌'''} for name in model_names: __snake_case = model_name_to_prefix[name] __snake_case = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(snake_case_ , snake_case_ ) for l, w in zip(snake_case_ , snake_case_ )] ) + "|\n" return table def lowerCamelCase__ ( snake_case_ : List[str]=False ) -> Optional[int]: __snake_case , __snake_case , __snake_case , __snake_case = _find_text_in_file( filename=os.path.join(snake_case_ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) __snake_case = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(snake_case_ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') snake_case_ = parser.parse_args() check_model_table(args.fix_and_overwrite)
24
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[Any] = 'vit_msn' def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ): """simple docstring""" super().__init__(**a__ ) __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = image_size __snake_case = patch_size __snake_case = num_channels __snake_case = qkv_bias
24
1
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def lowerCamelCase__ ( snake_case_ : int = 3 ) -> qiskit.result.counts.Counts: if isinstance(snake_case_ , snake_case_ ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(snake_case_ ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) __snake_case = QuantumRegister(snake_case_ , '''qr''' ) __snake_case = ClassicalRegister(snake_case_ , '''cr''' ) __snake_case = QuantumCircuit(snake_case_ , snake_case_ ) __snake_case = number_of_qubits for i in range(snake_case_ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(snake_case_ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , snake_case_ , snake_case_ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(snake_case_ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(snake_case_ , snake_case_ ) # simulate with 10000 shots __snake_case = Aer.get_backend('''qasm_simulator''' ) __snake_case = execute(snake_case_ , snake_case_ , shots=1_0000 ) return job.result().get_counts(snake_case_ ) if __name__ == "__main__": print( F'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
24
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def a (self : int , a__ : List[Any]=0 ): """simple docstring""" __snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) ) __snake_case = np.random.RandomState(a__ ) __snake_case = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.7_5, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def a (self : List[Any] ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def a (self : Dict ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : List[str] ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) # warmup pass to apply optimizations __snake_case = pipe(**self.get_dummy_inputs() ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : Any ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : Dict ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : List[str] ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def a (self : List[str] ): """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a (self : Optional[Any] ): """simple docstring""" __snake_case = ort.SessionOptions() __snake_case = False return options def a (self : Optional[Any] ): """simple docstring""" __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __snake_case = init_image.resize((768, 512) ) # using the PNDM scheduler by default __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = '''A fantasy landscape, trending on artstation''' __snake_case = np.random.RandomState(0 ) __snake_case = pipe( prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , ) __snake_case = output.images __snake_case = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def a (self : Dict ): """simple docstring""" __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __snake_case = init_image.resize((768, 512) ) __snake_case = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = '''A fantasy landscape, trending on artstation''' __snake_case = np.random.RandomState(0 ) __snake_case = pipe( prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , ) __snake_case = output.images __snake_case = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
24
1
from bisect import bisect from itertools import accumulate def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Dict ) -> str: __snake_case = sorted(zip(snake_case_ , snake_case_ ) , key=lambda snake_case_ : x[0] / x[1] , reverse=snake_case_ ) __snake_case , __snake_case = [i[0] for i in r], [i[1] for i in r] __snake_case = list(accumulate(snake_case_ ) ) __snake_case = bisect(snake_case_ , snake_case_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
24
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) snake_case_ = logging.getLogger(__name__) @dataclass(frozen=_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ : A_ : str A_ : str A_ : Optional[str] = None A_ : Optional[str] = None A_ : Optional[str] = None @dataclass(frozen=_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ : A_ : List[int] A_ : Optional[List[int]] = None A_ : Optional[List[int]] = None A_ : Optional[Union[int, float]] = None A_ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[InputFeatures] def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ): """simple docstring""" __snake_case = hans_processors[task]() __snake_case = os.path.join( a__ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , ) __snake_case = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case = label_list[2], label_list[1] __snake_case = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case = cached_features_file + '''.lock''' with FileLock(a__ ): if os.path.exists(a__ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case = torch.load(a__ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case = ( processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ ) ) logger.info('''Training examples: %s''' , len(a__ ) ) __snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ ) logger.info('''Saving features into cached file %s''' , a__ ) torch.save(self.features , a__ ) def __len__(self : int ): """simple docstring""" return len(self.features ) def __getitem__(self : Dict , a__ : List[Any] ): """simple docstring""" return self.features[i] def a (self : List[Any] ): """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class SCREAMING_SNAKE_CASE__ : A_ : List[InputFeatures] def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ): """simple docstring""" __snake_case = hans_processors[task]() __snake_case = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case = label_list[2], label_list[1] __snake_case = label_list __snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ ) __snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case = tf.data.Dataset.from_generator( a__ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def a (self : Union[str, Any] ): """simple docstring""" return self.dataset def __len__(self : Dict ): """simple docstring""" return len(self.features ) def __getitem__(self : Any , a__ : Dict ): """simple docstring""" return self.features[i] def a (self : str ): """simple docstring""" return self.label_list class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def a (self : Dict , a__ : Dict ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def a (self : Optional[int] , a__ : Tuple ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def a (self : int ): """simple docstring""" return ["contradiction", "entailment", "neutral"] def a (self : Any , a__ : Optional[int] , a__ : List[Any] ): """simple docstring""" __snake_case = [] for i, line in enumerate(a__ ): if i == 0: continue __snake_case = '''%s-%s''' % (set_type, line[0]) __snake_case = line[5] __snake_case = line[6] __snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case = line[0] examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) ) return examples def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]: __snake_case = {label: i for i, label in enumerate(snake_case_ )} __snake_case = [] for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case = tokenizer( example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , ) __snake_case = label_map[example.label] if example.label in label_map else 0 __snake_case = int(example.pairID ) features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features snake_case_ = { 'hans': 3, } snake_case_ = { 'hans': HansProcessor, }
24
1
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( 'pipelines_utils', '0.22.0', 'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.', standard_warn=False, stacklevel=3, )
24
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[str] = ['image_processor', 'tokenizer'] A_ : Optional[Any] = 'CLIPImageProcessor' A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ): """simple docstring""" __snake_case = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a__ , ) __snake_case = kwargs.pop('''feature_extractor''' ) __snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a__ , a__ ) def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ ) if images is not None: __snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ ) if text is not None and images is not None: __snake_case = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ ) def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*a__ , **a__ ) def a (self : Any , *a__ : List[Any] , **a__ : List[str] ): """simple docstring""" return self.tokenizer.decode(*a__ , **a__ ) @property def a (self : int ): """simple docstring""" __snake_case = self.tokenizer.model_input_names __snake_case = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
24
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case_ = logging.get_logger(__name__) snake_case_ = { 'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json', # See all Nat models at https://huggingface.co/models?filter=nat } class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase ): A_ : Any = 'nat' A_ : List[str] = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__(self : Optional[int] , a__ : Union[str, Any]=4 , a__ : Optional[int]=3 , a__ : Union[str, Any]=64 , a__ : Optional[Any]=[3, 4, 6, 5] , a__ : Optional[int]=[2, 4, 8, 16] , a__ : List[str]=7 , a__ : str=3.0 , a__ : Dict=True , a__ : Any=0.0 , a__ : int=0.0 , a__ : List[str]=0.1 , a__ : Any="gelu" , a__ : List[str]=0.0_2 , a__ : Optional[int]=1E-5 , a__ : str=0.0 , a__ : List[Any]=None , a__ : List[Any]=None , **a__ : List[str] , ): """simple docstring""" super().__init__(**a__ ) __snake_case = patch_size __snake_case = num_channels __snake_case = embed_dim __snake_case = depths __snake_case = len(a__ ) __snake_case = num_heads __snake_case = kernel_size __snake_case = mlp_ratio __snake_case = qkv_bias __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = drop_path_rate __snake_case = hidden_act __snake_case = layer_norm_eps __snake_case = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __snake_case = int(embed_dim * 2 ** (len(a__ ) - 1) ) __snake_case = layer_scale_init_value __snake_case = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(a__ ) + 1 )] __snake_case , __snake_case = get_aligned_output_features_output_indices( out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
24
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Dict ): """simple docstring""" __snake_case = logging.get_logger() # the current default level is logging.WARNING __snake_case = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(a__ ) def a (self : Dict ): """simple docstring""" __snake_case = logging.get_verbosity() __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = '''Testing 1, 2, 3''' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , '''''' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) # restore to the original level logging.set_verbosity(a__ ) @mockenv(TRANSFORMERS_VERBOSITY='''error''' ) def a (self : Dict ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() # this action activates the env var __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ ) __snake_case = logging.log_levels[env_level_str] __snake_case = logging.get_verbosity() self.assertEqual( a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level __snake_case = '''''' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='''super-error''' ) def a (self : List[Any] ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() __snake_case = logging.logging.getLogger() with CaptureLogger(a__ ) as cl: # this action activates the env var logging.get_logger('''transformers.models.bart.tokenization_bart''' ) self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out ) # no need to restore as nothing was changed def a (self : Any ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = '''Testing 1, 2, 3''' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ): # nothing should be logged as env var disables this method with CaptureLogger(a__ ) as cl: logger.warning_advice(a__ ) self.assertEqual(cl.out , '''''' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(a__ ) as cl: logger.warning_advice(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) def lowerCamelCase__ ( ) -> str: disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
24
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def lowerCamelCase__ ( snake_case_ : Tuple=None ) -> Optional[int]: __snake_case = argparse.ArgumentParser(add_help=snake_case_ , allow_abbrev=snake_case_ ) # The main config parser __snake_case = config_command_parser(snake_case_ ) # The subparser to add commands to __snake_case = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' ) # Then add other parsers with the parent parser default_command_parser(snake_case_ , parents=[parent_parser] ) update_command_parser(snake_case_ , parents=[parent_parser] ) return config_parser def lowerCamelCase__ ( ) -> Optional[int]: __snake_case = get_config_parser() __snake_case = config_parser.parse_args() if not hasattr(snake_case_ , '''func''' ): config_parser.print_help() exit(1 ) # Run args.func(snake_case_ ) if __name__ == "__main__": main()
24
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : List[str] = CpmAntTokenizer A_ : Optional[int] = False def a (self : Optional[int] ): """simple docstring""" super().setUp() __snake_case = [ '''<d>''', '''</d>''', '''<s>''', '''</s>''', '''</_>''', '''<unk>''', '''<pad>''', '''</n>''', '''我''', '''是''', '''C''', '''P''', '''M''', '''A''', '''n''', '''t''', ] __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) @tooslow def a (self : Dict ): """simple docstring""" __snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' ) __snake_case = '''今天天气真好!''' __snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!'''] __snake_case = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) __snake_case = '''今天天气真好!''' __snake_case = [tokenizer.bos_token] + tokens __snake_case = [6, 9802, 1_4962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ ) __snake_case = tokenizer.decode(a__ ) self.assertEqual(a__ , a__ )
24
1
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class SCREAMING_SNAKE_CASE__ : def __init__(self : int , a__ : Optional[int] , a__ : List[Any]=13 , a__ : Dict=7 , a__ : str=True , a__ : str=True , a__ : Optional[int]=False , a__ : Optional[int]=True , a__ : Union[str, Any]=99 , a__ : Optional[int]=32 , a__ : Optional[int]=5 , a__ : Optional[Any]=4 , a__ : str=37 , a__ : Union[str, Any]="gelu" , a__ : int=0.1 , a__ : List[Any]=0.1 , a__ : Dict=512 , a__ : Optional[int]=16 , a__ : List[Any]=2 , a__ : str=0.0_2 , a__ : int=3 , a__ : Tuple=4 , a__ : Optional[Any]=None , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = num_labels __snake_case = num_choices __snake_case = scope def a (self : List[Any] ): """simple docstring""" __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case = None if self.use_input_mask: __snake_case = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case = None if self.use_token_type_ids: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case = None __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case = ids_tensor([self.batch_size] , self.num_choices ) __snake_case = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a (self : str ): """simple docstring""" return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , ) def a (self : str , a__ : int , a__ : str , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Optional[int] , a__ : Optional[Any] , a__ : List[str] ): """simple docstring""" __snake_case = LlamaModel(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , attention_mask=a__ ) __snake_case = model(a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a (self : Optional[Any] , a__ : str , a__ : List[Any] , a__ : Dict , a__ : Any , a__ : Tuple , a__ : Optional[int] , a__ : List[str] , a__ : Optional[Any] , a__ : Dict , ): """simple docstring""" __snake_case = True __snake_case = LlamaModel(a__ ) model.to(a__ ) model.eval() __snake_case = model( a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , ) __snake_case = model( a__ , attention_mask=a__ , encoder_hidden_states=a__ , ) __snake_case = model(a__ , attention_mask=a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a (self : Any , a__ : Union[str, Any] , a__ : Tuple , a__ : Tuple , a__ : int , a__ : int , a__ : str , a__ : int , a__ : List[Any] , a__ : List[Any] , ): """simple docstring""" __snake_case = LlamaForCausalLM(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , attention_mask=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a (self : int , a__ : str , a__ : Tuple , a__ : Union[str, Any] , a__ : Optional[int] , a__ : Any , a__ : str , a__ : Optional[int] , a__ : Union[str, Any] , a__ : int , ): """simple docstring""" __snake_case = True __snake_case = True __snake_case = LlamaForCausalLM(config=a__ ) model.to(a__ ) model.eval() # first forward pass __snake_case = model( a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , use_cache=a__ , ) __snake_case = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size ) __snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __snake_case = torch.cat([input_ids, next_tokens] , dim=-1 ) __snake_case = torch.cat([input_mask, next_mask] , dim=-1 ) __snake_case = model( a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_hidden_states=a__ , )['''hidden_states'''][0] __snake_case = model( a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , past_key_values=a__ , output_hidden_states=a__ , )['''hidden_states'''][0] # select random slice __snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item() __snake_case = output_from_no_past[:, -3:, random_slice_idx].detach() __snake_case = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1E-3 ) ) def a (self : Dict ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : List[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () A_ : List[str] = (LlamaForCausalLM,) if is_torch_available() else () A_ : Dict = ( { 'feature-extraction': LlamaModel, 'text-classification': LlamaForSequenceClassification, 'text-generation': LlamaForCausalLM, 'zero-shot': LlamaForSequenceClassification, } if is_torch_available() else {} ) A_ : Any = False A_ : Union[str, Any] = False def a (self : Optional[Any] ): """simple docstring""" __snake_case = LlamaModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 ) def a (self : Optional[Any] ): """simple docstring""" self.config_tester.run_common_tests() def a (self : List[str] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def a (self : List[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case = type self.model_tester.create_and_check_model(*a__ ) def a (self : Optional[int] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = input_dict['''input_ids'''] __snake_case = input_ids.ne(1 ).to(a__ ) __snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __snake_case = LlamaForSequenceClassification(a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , attention_mask=a__ , labels=a__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def a (self : Union[str, Any] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = '''single_label_classification''' __snake_case = input_dict['''input_ids'''] __snake_case = input_ids.ne(1 ).to(a__ ) __snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __snake_case = LlamaForSequenceClassification(a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , attention_mask=a__ , labels=a__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def a (self : List[Any] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = '''multi_label_classification''' __snake_case = input_dict['''input_ids'''] __snake_case = input_ids.ne(1 ).to(a__ ) __snake_case = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __snake_case = LlamaForSequenceClassification(a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , attention_mask=a__ , labels=a__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' ) def a (self : Union[str, Any] ): """simple docstring""" pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def a (self : Optional[Any] , a__ : Optional[int] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = ids_tensor([1, 10] , config.vocab_size ) __snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __snake_case = LlamaModel(a__ ) original_model.to(a__ ) original_model.eval() __snake_case = original_model(a__ ).last_hidden_state __snake_case = original_model(a__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __snake_case = {'''type''': scaling_type, '''factor''': 1_0.0} __snake_case = LlamaModel(a__ ) scaled_model.to(a__ ) scaled_model.eval() __snake_case = scaled_model(a__ ).last_hidden_state __snake_case = scaled_model(a__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(a__ , a__ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(a__ , a__ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(a__ , a__ , atol=1E-5 ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def a (self : Tuple ): """simple docstring""" __snake_case = [1, 306, 4658, 278, 6593, 310, 2834, 338] __snake_case = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' ) __snake_case = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 __snake_case = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] ) torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __snake_case = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def a (self : str ): """simple docstring""" __snake_case = [1, 306, 4658, 278, 6593, 310, 2834, 338] __snake_case = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' ) __snake_case = model(torch.tensor(a__ ) ) # Expected mean on dim = -1 __snake_case = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] ) torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __snake_case = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def a (self : Any ): """simple docstring""" __snake_case = [1, 306, 4658, 278, 6593, 310, 2834, 338] __snake_case = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' ) __snake_case = model(torch.tensor(a__ ) ) # Expected mean on dim = -1 __snake_case = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] ) torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __snake_case = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 ) @unittest.skip( '''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' ) @slow def a (self : Optional[int] ): """simple docstring""" __snake_case = [1, 306, 4658, 278, 6593, 310, 2834, 338] __snake_case = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' ) __snake_case = model(torch.tensor(a__ ) ) __snake_case = torch.tensor( [[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 ) # fmt: off __snake_case = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Model is curently gated''' ) @slow def a (self : List[str] ): """simple docstring""" __snake_case = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi''' __snake_case = '''Simply put, the theory of relativity states that ''' __snake_case = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ) __snake_case = tokenizer.encode(a__ , return_tensors='''pt''' ) __snake_case = LlamaForCausalLM.from_pretrained( '''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=a__ ) # greedy generation outputs __snake_case = model.generate(a__ , max_new_tokens=64 , top_p=a__ , temperature=1 , do_sample=a__ ) __snake_case = tokenizer.decode(generated_ids[0] , skip_special_tokens=a__ ) self.assertEqual(a__ , a__ )
24
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class SCREAMING_SNAKE_CASE__ : def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ): """simple docstring""" __snake_case = parent __snake_case = out_indices if out_indices is not None else [4] __snake_case = stage_names __snake_case = out_features __snake_case = backbone __snake_case = batch_size __snake_case = image_size __snake_case = num_channels __snake_case = use_pretrained_backbone __snake_case = is_training def a (self : Union[str, Any] ): """simple docstring""" __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = self.get_config() return config, pixel_values def a (self : Any ): """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def a (self : List[Any] , a__ : int , a__ : int ): """simple docstring""" __snake_case = TimmBackbone(config=a__ ) model.to(a__ ) model.eval() with torch.no_grad(): __snake_case = model(a__ ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def a (self : str ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case = config_and_inputs __snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch @require_timm class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else () A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {} A_ : List[Any] = False A_ : Dict = False A_ : Any = False A_ : List[Any] = False def a (self : Tuple ): """simple docstring""" __snake_case = TimmBackboneModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ ) def a (self : Any ): """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a (self : int ): """simple docstring""" __snake_case = '''resnet18''' __snake_case = '''microsoft/resnet-18''' __snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ ) __snake_case = AutoBackbone.from_pretrained(a__ ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] ) __snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' ) def a (self : str ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''TimmBackbone initialization is managed on the timm side''' ) def a (self : Union[str, Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def a (self : Optional[int] ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def a (self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : Dict ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : List[Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' ) def a (self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' ) def a (self : List[Any] ): """simple docstring""" pass @unittest.skip('''Safetensors is not supported by timm.''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def a (self : Tuple ): """simple docstring""" pass def a (self : Tuple ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a__ ) def a (self : Dict ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = True __snake_case = self.has_attentions # no need to test all models as different heads yield the same functionality __snake_case = self.all_model_classes[0] __snake_case = model_class(a__ ) model.to(a__ ) __snake_case = self._prepare_for_class(a__ , a__ ) __snake_case = model(**a__ ) __snake_case = outputs[0][-1] # Encoder-/Decoder-only models __snake_case = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __snake_case = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=a__ ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def a (self : Optional[int] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __snake_case = copy.deepcopy(a__ ) __snake_case = None __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __snake_case = copy.deepcopy(a__ ) __snake_case = False __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ )
24
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: snake_case_ = None snake_case_ = logging.get_logger(__name__) snake_case_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} snake_case_ = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', }, 'tokenizer_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json', }, } snake_case_ = { 'camembert-base': 512, } snake_case_ = '▁' class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : str = VOCAB_FILES_NAMES A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP A_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Optional[int] = ['input_ids', 'attention_mask'] A_ : Union[str, Any] = CamembertTokenizer def __init__(self : List[Any] , a__ : Tuple=None , a__ : Dict=None , a__ : List[str]="<s>" , a__ : Union[str, Any]="</s>" , a__ : Union[str, Any]="</s>" , a__ : int="<s>" , a__ : Optional[Any]="<unk>" , a__ : Optional[int]="<pad>" , a__ : Optional[Any]="<mask>" , a__ : Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] , **a__ : Any , ): """simple docstring""" __snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token super().__init__( a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , additional_special_tokens=a__ , **a__ , ) __snake_case = vocab_file __snake_case = False if not self.vocab_file else True def a (self : Optional[int] , a__ : List[int] , a__ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case = [self.cls_token_id] __snake_case = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a (self : Dict , a__ : List[int] , a__ : Optional[List[int]] = None ): """simple docstring""" __snake_case = [self.sep_token_id] __snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a (self : str , a__ : str , a__ : Optional[str] = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(a__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __snake_case = os.path.join( a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ): copyfile(self.vocab_file , a__ ) return (out_vocab_file,)
24
import os import pytest from transformers.dynamic_module_utils import get_imports snake_case_ = '\nimport os\n' snake_case_ = '\ndef foo():\n import os\n return False\n' snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n' snake_case_ = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , snake_case_ ) def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict: __snake_case = os.path.join(snake_case_ , '''test_file.py''' ) with open(snake_case_ , '''w''' ) as _tmp_file: _tmp_file.write(snake_case_ ) __snake_case = get_imports(snake_case_ ) assert parsed_imports == ["os"]
24
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : str = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} ) A_ : ClassVar[Features] = Features({'audio': Audio()} ) A_ : ClassVar[Features] = Features({'transcription': Value('string' )} ) A_ : str = "audio" A_ : str = "transcription" def a (self : Any , a__ : List[Any] ): """simple docstring""" if self.audio_column not in features: raise ValueError(f"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , a__ ): raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" ) __snake_case = copy.deepcopy(self ) __snake_case = self.input_schema.copy() __snake_case = features[self.audio_column] __snake_case = input_schema return task_template @property def a (self : str ): """simple docstring""" return {self.audio_column: "audio", self.transcription_column: "transcription"}
24
import socket def lowerCamelCase__ ( ) -> Any: __snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) __snake_case = socket.gethostname() __snake_case = 1_2312 sock.connect((host, port) ) sock.send(B'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: __snake_case = sock.recv(1024 ) if not data: break out_file.write(snake_case_ ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
24
1
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Dict ): """simple docstring""" __snake_case = logging.get_logger() # the current default level is logging.WARNING __snake_case = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(a__ ) def a (self : Dict ): """simple docstring""" __snake_case = logging.get_verbosity() __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = '''Testing 1, 2, 3''' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , '''''' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) # restore to the original level logging.set_verbosity(a__ ) @mockenv(TRANSFORMERS_VERBOSITY='''error''' ) def a (self : Dict ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() # this action activates the env var __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ ) __snake_case = logging.log_levels[env_level_str] __snake_case = logging.get_verbosity() self.assertEqual( a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level __snake_case = '''''' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='''super-error''' ) def a (self : List[Any] ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() __snake_case = logging.logging.getLogger() with CaptureLogger(a__ ) as cl: # this action activates the env var logging.get_logger('''transformers.models.bart.tokenization_bart''' ) self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out ) # no need to restore as nothing was changed def a (self : Any ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = '''Testing 1, 2, 3''' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ): # nothing should be logged as env var disables this method with CaptureLogger(a__ ) as cl: logger.warning_advice(a__ ) self.assertEqual(cl.out , '''''' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(a__ ) as cl: logger.warning_advice(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) def lowerCamelCase__ ( ) -> str: disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
24
from __future__ import annotations def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive __snake_case = len(snake_case_ ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else __snake_case = array[0] __snake_case = False __snake_case = 1 __snake_case = [] while not is_found and i < array_length: if array[i] < pivot: __snake_case = True __snake_case = [element for element in array[i:] if element >= array[i]] __snake_case = longest_subsequence(snake_case_ ) if len(snake_case_ ) > len(snake_case_ ): __snake_case = temp_array else: i += 1 __snake_case = [element for element in array[1:] if element >= pivot] __snake_case = [pivot, *longest_subsequence(snake_case_ )] if len(snake_case_ ) > len(snake_case_ ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
24
1
from __future__ import annotations def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : list[int] , snake_case_ : int ) -> tuple[float, list[float]]: __snake_case = list(range(len(snake_case_ ) ) ) __snake_case = [v / w for v, w in zip(snake_case_ , snake_case_ )] index.sort(key=lambda snake_case_ : ratio[i] , reverse=snake_case_ ) __snake_case = 0 __snake_case = [0] * len(snake_case_ ) for i in index: if weight[i] <= capacity: __snake_case = 1 max_value += value[i] capacity -= weight[i] else: __snake_case = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
24
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = num_labels __snake_case = scope __snake_case = range_bbox def a (self : Optional[int] ): """simple docstring""" __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case = bbox[i, j, 3] __snake_case = bbox[i, j, 1] __snake_case = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case = bbox[i, j, 2] __snake_case = bbox[i, j, 0] __snake_case = t __snake_case = None if self.use_input_mask: __snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __snake_case = None if self.use_token_type_ids: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def a (self : List[str] ): """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ): """simple docstring""" __snake_case = LiltModel(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ ) __snake_case = model(a__ , bbox=a__ , token_type_ids=a__ ) __snake_case = model(a__ , bbox=a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ): """simple docstring""" __snake_case = self.num_labels __snake_case = LiltForTokenClassification(config=a__ ) model.to(a__ ) model.eval() __snake_case = model( a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ): """simple docstring""" __snake_case = LiltForQuestionAnswering(config=a__ ) model.to(a__ ) model.eval() __snake_case = model( a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a (self : Tuple ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : List[Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) A_ : Any = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) A_ : Optional[int] = False A_ : List[Any] = False def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ): """simple docstring""" return True def a (self : Union[str, Any] ): """simple docstring""" __snake_case = LiltModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 ) def a (self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() def a (self : int ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def a (self : List[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case = type self.model_tester.create_and_check_model(*a__ ) def a (self : Optional[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a__ ) def a (self : Union[str, Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a__ ) @slow def a (self : Optional[int] ): """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = LiltModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) @require_torch @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Tuple ): """simple docstring""" __snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ ) __snake_case = torch.tensor([[1, 2]] , device=a__ ) __snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ ) # forward pass with torch.no_grad(): __snake_case = model(input_ids=a__ , bbox=a__ ) __snake_case = torch.Size([1, 2, 768] ) __snake_case = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , ) self.assertTrue(outputs.last_hidden_state.shape , a__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
24
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowercase_ ( lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = ShapEPipeline __snake_case = ['''prompt'''] __snake_case = ['''prompt'''] __snake_case = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] __snake_case = False @property def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" return 32 @property def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]: """simple docstring""" return 32 @property def __lowerCAmelCase ( self : Any ) ->Tuple: """simple docstring""" return self.time_input_dim * 4 @property def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]: """simple docstring""" return 8 @property def __lowerCAmelCase ( self : Tuple ) ->str: """simple docstring""" a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def __lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]: """simple docstring""" torch.manual_seed(0 ) a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(__UpperCAmelCase ) @property def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]: """simple docstring""" torch.manual_seed(0 ) a = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } a = PriorTransformer(**__UpperCAmelCase ) return model @property def __lowerCAmelCase ( self : List[Any] ) ->List[str]: """simple docstring""" torch.manual_seed(0 ) a = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } a = ShapERenderer(**__UpperCAmelCase ) return model def __lowerCAmelCase ( self : List[Any] ) ->Any: """simple docstring""" a = self.dummy_prior a = self.dummy_text_encoder a = self.dummy_tokenizer a = self.dummy_renderer a = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , ) a = { '''prior''': prior, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str=0 ) ->Optional[int]: """simple docstring""" if str(__UpperCAmelCase ).startswith('''mps''' ): a = torch.manual_seed(__UpperCAmelCase ) else: a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) a = { '''prompt''': '''horse''', '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def __lowerCAmelCase ( self : Dict ) ->Optional[int]: """simple docstring""" a = '''cpu''' a = self.get_dummy_components() a = self.pipeline_class(**__UpperCAmelCase ) a = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) ) a = output.images[0] a = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) a = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowerCAmelCase ( self : Dict ) ->Optional[Any]: """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" a = torch_device == '''cpu''' a = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , ) def __lowerCAmelCase ( self : str ) ->Optional[int]: """simple docstring""" a = self.get_dummy_components() a = self.pipeline_class(**__UpperCAmelCase ) a = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) a = 1 a = 2 a = self.get_dummy_inputs(__UpperCAmelCase ) for key in inputs.keys(): if key in self.batch_params: a = batch_size * [inputs[key]] a = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowercase_ ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : int ) ->Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" a = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_np_out.npy''' ) a = ShapEPipeline.from_pretrained('''openai/shap-e''' ) a = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) a = pipe( '''a shark''' , generator=__UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
0
import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class SCREAMING_SNAKE_CASE__ : @staticmethod def a (*a__ : List[str] , **a__ : List[str] ): """simple docstring""" pass def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]: return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. snake_case_ = ( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): A_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def a (self : List[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ): """simple docstring""" __snake_case = pipeline( '''document-question-answering''' , model=a__ , tokenizer=a__ , image_processor=a__ ) __snake_case = INVOICE_URL __snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) ) __snake_case = '''What is the placebo?''' __snake_case = [ { '''image''': load_image(a__ ), '''question''': question, }, { '''image''': image, '''question''': question, }, { '''image''': image, '''question''': question, '''word_boxes''': word_boxes, }, ] return dqa_pipeline, examples def a (self : Union[str, Any] , a__ : Optional[int] , a__ : Dict ): """simple docstring""" __snake_case = dqa_pipeline(a__ , top_k=2 ) self.assertEqual( a__ , [ [ {'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )}, {'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def a (self : Dict ): """simple docstring""" __snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' ) __snake_case = INVOICE_URL __snake_case = '''How many cats are there?''' __snake_case = [ {'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39}, {'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40}, ] __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ ) __snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably __snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual(a__ , [] ) # We can optionnally pass directly the words and bounding boxes __snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' __snake_case = [] __snake_case = [] __snake_case = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 ) self.assertEqual(a__ , [] ) @slow @require_torch @require_detectrona @require_pytesseract def a (self : str ): """simple docstring""" __snake_case = pipeline( '''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def a (self : List[Any] ): """simple docstring""" __snake_case = pipeline( '''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def a (self : Tuple ): """simple docstring""" __snake_case = AutoTokenizer.from_pretrained( '''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ ) __snake_case = pipeline( '''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) __snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) __snake_case = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] ] * 2 , ) __snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) ) # This model should also work if `image` is set to None __snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def a (self : Dict ): """simple docstring""" __snake_case = AutoTokenizer.from_pretrained( '''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ ) __snake_case = pipeline( '''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , max_seq_len=50 , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] ] * 2 , ) __snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) ) # This model should also work if `image` is set to None __snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) @slow @require_torch def a (self : Tuple ): """simple docstring""" __snake_case = pipeline( '''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , ) __snake_case = INVOICE_URL __snake_case = '''What is the invoice number?''' __snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 ) self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{'''answer''': '''us-001'''}] ) @require_tf @unittest.skip('''Document question answering not implemented in TF''' ) def a (self : List[str] ): """simple docstring""" pass
24
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__) class __A ( UpperCamelCase__ ): def __init__(self : Optional[Any] , *__a : int , **__a : str ): warnings.warn( "The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ImageGPTImageProcessor instead." , __a , ) super().__init__(*__a , **__a )
1
from __future__ import annotations def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]: __snake_case = [] __snake_case = [] __snake_case = 0 __snake_case = sum(snake_case_ ) create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) return result def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None: if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum: return if sum(snake_case_ ) == max_sum: result.append(snake_case_ ) return for index in range(snake_case_ , len(snake_case_ ) ): create_state_space_tree( snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , ) snake_case_ = [3, 34, 4, 12, 5, 2] snake_case_ = 9 snake_case_ = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
24
0
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase__ (self : List[str] ): '''simple docstring''' lowercase__ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) lowercase__ = AutoTokenizer.from_pretrained('''google/mt5-small''' ) lowercase__ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids lowercase__ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids lowercase__ = shift_tokens_right(UpperCamelCase , model.config.pad_token_id , model.config.decoder_start_token_id ) lowercase__ = model(UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits lowercase__ = optax.softmax_cross_entropy(UpperCamelCase , onehot(UpperCamelCase , logits.shape[-1] ) ).mean() lowercase__ = -(labels.shape[-1] * loss.item()) lowercase__ = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
2
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = image_size __snake_case = num_channels __snake_case = num_stages __snake_case = hidden_sizes __snake_case = depths __snake_case = is_training __snake_case = use_labels __snake_case = intermediate_size __snake_case = hidden_act __snake_case = num_labels __snake_case = initializer_range __snake_case = out_features __snake_case = out_indices __snake_case = scope def a (self : Dict ): """simple docstring""" __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.num_labels ) __snake_case = self.get_config() return config, pixel_values, labels def a (self : List[str] ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ): """simple docstring""" __snake_case = ConvNextModel(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ): """simple docstring""" __snake_case = ConvNextForImageClassification(a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ): """simple docstring""" __snake_case = ConvNextBackbone(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __snake_case = None __snake_case = ConvNextBackbone(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a (self : Tuple ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case = config_and_inputs __snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : Dict = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) A_ : Optional[Any] = ( {'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification} if is_torch_available() else {} ) A_ : Dict = True A_ : Optional[Any] = False A_ : int = False A_ : int = False A_ : List[str] = False def a (self : List[str] ): """simple docstring""" __snake_case = ConvNextModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 ) def a (self : Tuple ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a (self : str ): """simple docstring""" return @unittest.skip(reason='''ConvNext does not use inputs_embeds''' ) def a (self : int ): """simple docstring""" pass @unittest.skip(reason='''ConvNext does not support input and output embeddings''' ) def a (self : Dict ): """simple docstring""" pass @unittest.skip(reason='''ConvNext does not use feedforward chunking''' ) def a (self : List[Any] ): """simple docstring""" pass def a (self : Optional[Any] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a__ ) def a (self : List[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def a (self : Dict ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*a__ ) def a (self : Dict ): """simple docstring""" def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ): __snake_case = model_class(a__ ) model.to(a__ ) model.eval() with torch.no_grad(): __snake_case = model(**self._prepare_for_class(a__ , a__ ) ) __snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case = self.model_tester.num_stages self.assertEqual(len(a__ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = True check_hidden_states_output(a__ , a__ , a__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case = True check_hidden_states_output(a__ , a__ , a__ ) def a (self : Optional[Any] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a__ ) @slow def a (self : Any ): """simple docstring""" for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = ConvNextModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) def lowerCamelCase__ ( ) -> List[str]: __snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def a (self : Tuple ): """simple docstring""" return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None @slow def a (self : Optional[Any] ): """simple docstring""" __snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ ) __snake_case = self.default_image_processor __snake_case = prepare_img() __snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ ) # forward pass with torch.no_grad(): __snake_case = model(**a__ ) # verify the logits __snake_case = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , a__ ) __snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ): A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else () A_ : List[Any] = ConvNextConfig A_ : Optional[Any] = False def a (self : Optional[int] ): """simple docstring""" __snake_case = ConvNextModelTester(self )
24
0
'''simple docstring''' import argparse lowercase : Dict = 'docs/source/_static/js/custom.js' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' with open(snake_case__ , encoding='''utf-8''' , newline='''\n''' ) as f: A : Dict = f.readlines() A : Dict = 0 # First let's put the right version while not lines[index].startswith('''const stableVersion =''' ): index += 1 A : Union[str, Any] = F'const stableVersion = "v{version}"\n' # Then update the dictionary while not lines[index].startswith('''const versionMapping = {''' ): index += 1 # We go until the end while not lines[index].startswith('''}''' ): index += 1 # We add the new version at the end lines[index - 1] += F' "v{version}": "v{version}",\n' with open(snake_case__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case__ ) if __name__ == "__main__": lowercase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--version', help='Release version.') lowercase : List[str] = parser.parse_args() update_custom_js(args.version)
3
def lowerCamelCase__ ( ) -> int: return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(snake_case_ , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
24
0
'''simple docstring''' def a_ ( lowerCamelCase : list ): if len(lowerCamelCase ) <= 1: return [tuple(lowerCamelCase )] lowerCAmelCase = [] def generate(lowerCamelCase : int , lowerCamelCase : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowerCamelCase ) for i in range(k - 1 ): if k % 2 == 0: # k is even lowerCAmelCase , lowerCAmelCase = arr[k - 1], arr[i] else: # k is odd lowerCAmelCase , lowerCAmelCase = arr[k - 1], arr[0] generate(k - 1 , lowerCamelCase ) generate(len(lowerCamelCase ) , lowerCamelCase ) return res if __name__ == "__main__": __snake_case =input("""Enter numbers separated by a comma:\n""").strip() __snake_case =[int(item) for item in user_input.split(""",""")] print(heaps(arr))
4
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : List[Any] = BartphoTokenizer A_ : List[str] = False A_ : Optional[Any] = True def a (self : Tuple ): """simple docstring""" super().setUp() __snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] __snake_case = dict(zip(a__ , range(len(a__ ) ) ) ) __snake_case = {'''unk_token''': '''<unk>'''} __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ) with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(f"""{token} {vocab_tokens[token]}\n""" ) __snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def a (self : str , **a__ : str ): """simple docstring""" kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ ) def a (self : str , a__ : Any ): """simple docstring""" __snake_case = '''This is a là test''' __snake_case = '''This is a<unk><unk> test''' return input_text, output_text def a (self : Dict ): """simple docstring""" __snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map ) __snake_case = '''This is a là test''' __snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split() __snake_case = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) __snake_case = tokens + [tokenizer.unk_token] __snake_case = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
24
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase): SCREAMING_SNAKE_CASE__ = '''focalnet''' def __init__(self , UpperCAmelCase=2_2_4 , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=9_6 , UpperCAmelCase=False , UpperCAmelCase=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , UpperCAmelCase=[2, 2, 6, 2] , UpperCAmelCase=[2, 2, 2, 2] , UpperCAmelCase=[3, 3, 3, 3] , UpperCAmelCase="gelu" , UpperCAmelCase=4.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=False , UpperCAmelCase=1e-4 , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=3_2 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Dict: super().__init__(**UpperCAmelCase ) _lowercase =image_size _lowercase =patch_size _lowercase =num_channels _lowercase =embed_dim _lowercase =use_conv_embed _lowercase =hidden_sizes _lowercase =depths _lowercase =focal_levels _lowercase =focal_windows _lowercase =hidden_act _lowercase =mlp_ratio _lowercase =hidden_dropout_prob _lowercase =drop_path_rate _lowercase =use_layerscale _lowercase =layerscale_value _lowercase =use_post_layernorm _lowercase =use_post_layernorm_in_modulation _lowercase =normalize_modulator _lowercase =initializer_range _lowercase =layer_norm_eps _lowercase =encoder_stride _lowercase =['''stem'''] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )] _lowercase , _lowercase =get_aligned_output_features_output_indices( out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names )
5
def lowerCamelCase__ ( snake_case_ : int ) -> int: if not isinstance(snake_case_ , snake_case_ ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) __snake_case = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
24
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) A : Dict = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Any = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
from math import loga def lowerCamelCase__ ( snake_case_ : int ) -> int: if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(snake_case_ , snake_case_ ): raise TypeError('''Input value must be a \'int\' type''' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
24
0
lowercase_ = {str(digit): digit**5 for digit in range(10)} def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' return sum(DIGITS_FIFTH_POWER[digit] for digit in str(SCREAMING_SNAKE_CASE__ ) ) def _snake_case( ) -> int: '''simple docstring''' return sum( number for number in range(1000 , 1000000 ) if number == digits_fifth_powers_sum(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": print(solution())
7
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING snake_case_ = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ): """simple docstring""" super().__init__(*a__ , **a__ ) requires_backends(self , '''vision''' ) self.check_model_type(a__ ) def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ): """simple docstring""" return super().__call__(a__ , **a__ ) def a (self : Dict , **a__ : Any ): """simple docstring""" return {}, {}, {} def a (self : List[str] , a__ : Any ): """simple docstring""" __snake_case = load_image(a__ ) __snake_case = image.size __snake_case = self.image_processor(images=a__ , return_tensors=self.framework ) return model_inputs def a (self : int , a__ : List[Any] ): """simple docstring""" __snake_case = self.model(**a__ ) return model_outputs def a (self : int , a__ : str ): """simple docstring""" __snake_case = model_outputs.predicted_depth __snake_case = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ ) __snake_case = prediction.squeeze().cpu().numpy() __snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' ) __snake_case = Image.fromarray(a__ ) __snake_case = {} __snake_case = predicted_depth __snake_case = depth return output_dict
24
0
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('''Input value must be a \'int\' type''' ) return bin(SCREAMING_SNAKE_CASE__ ).count('''1''' ) if __name__ == "__main__": import doctest doctest.testmod()
8
from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def lowerCamelCase__ ( ) -> Any: import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join __snake_case = '''__test_patch_submodule_mock__''' with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def lowerCamelCase__ ( ) -> Any: assert _test_patching.open is open __snake_case = '''__test_patch_submodule_builtin_mock__''' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , '''open''' , snake_case_ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def lowerCamelCase__ ( ) -> List[str]: # pandas.read_csv is not present in _test_patching __snake_case = '''__test_patch_submodule_missing_mock__''' with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ): pass def lowerCamelCase__ ( ) -> Union[str, Any]: # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point __snake_case = '''__test_patch_submodule_missing_builtin_mock__''' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , '''len''' , snake_case_ ) is None with patch_submodule(_test_patching , '''len''' , snake_case_ ): assert _test_patching.len is mock assert _test_patching.len is len def lowerCamelCase__ ( ) -> Union[str, Any]: __snake_case = '''__test_patch_submodule_start_and_stop_mock__''' __snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def lowerCamelCase__ ( ) -> Optional[int]: from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join __snake_case = '''__test_patch_submodule_successive_join__''' __snake_case = '''__test_patch_submodule_successive_dirname__''' __snake_case = '''__test_patch_submodule_successive_rename__''' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ): with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ): with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ): with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ): with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def lowerCamelCase__ ( ) -> Tuple: __snake_case = '''__test_patch_submodule_doesnt_exist_mock__''' with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ): pass with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ): pass
24
0
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : List[Any] = [randint(-1000 , 1000 ) for i in range(10 )] __SCREAMING_SNAKE_CASE : Tuple = randint(-5000 , 5000 ) return (arr, r) __lowerCAmelCase : List[Any] =make_dataset() def _UpperCamelCase ( lowercase__ , lowercase__ ): for triplet in permutations(lowercase__ , 3 ): if sum(lowercase__ ) == target: return tuple(sorted(lowercase__ ) ) return (0, 0, 0) def _UpperCamelCase ( lowercase__ , lowercase__ ): arr.sort() __SCREAMING_SNAKE_CASE : Any = len(lowercase__ ) for i in range(n - 1 ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : Optional[int] = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' __SCREAMING_SNAKE_CASE : List[str] = ''' triplet_sum1(*dataset) ''' __SCREAMING_SNAKE_CASE : Any = ''' triplet_sum2(*dataset) ''' __SCREAMING_SNAKE_CASE : Any = repeat(setup=lowercase__ , stmt=lowercase__ , repeat=5 , number=10000 ) __SCREAMING_SNAKE_CASE : List[str] = repeat(setup=lowercase__ , stmt=lowercase__ , repeat=5 , number=10000 ) return (min(lowercase__ ), min(lowercase__ )) if __name__ == "__main__": from doctest import testmod testmod() __lowerCAmelCase : List[Any] =solution_times() print(f"""The time for naive implementation is {times[0]}.""") print(f"""The time for optimized implementation is {times[1]}.""")
9
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) snake_case_ = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE__ : A_ : str = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) A_ : Optional[str] = field( default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} ) A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} ) @dataclass class SCREAMING_SNAKE_CASE__ : A_ : str = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) A_ : Optional[str] = field( default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , ) A_ : Optional[int] = field( default=1_024 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A_ : Optional[int] = field( default=128 , metadata={ 'help': ( 'The maximum total sequence length for target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A_ : Optional[int] = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for validation target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded. ' 'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ' 'during ``evaluate`` and ``predict``.' ) } , ) A_ : Optional[int] = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for test target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} ) A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} ) A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} ) A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} ) A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} ) A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} ) A_ : bool = field( default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , ) def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str: logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) ) def lowerCamelCase__ ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses() check_output_dir(snake_case_ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , snake_case_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(snake_case_ , snake_case_ , snake_case_ ): assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) ) __snake_case = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __snake_case = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(snake_case_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __snake_case = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(snake_case_ , snake_case_ ): __snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(snake_case_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __snake_case = SeqaSeqDataset # Get datasets __snake_case = ( dataset_class( snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) __snake_case = ( dataset_class( snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __snake_case = ( dataset_class( snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer __snake_case = ( build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None ) __snake_case = SeqaSeqTrainer( model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator( snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , ) __snake_case = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) __snake_case = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __snake_case = train_result.metrics __snake_case = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __snake_case = trainer.evaluate(metric_key_prefix='''val''' ) __snake_case = data_args.n_val __snake_case = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.do_predict: logger.info('''*** Predict ***''' ) __snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' ) __snake_case = test_output.metrics __snake_case = data_args.n_test if trainer.is_world_process_zero(): __snake_case = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , snake_case_ , training_args.output_dir ) all_metrics.update(snake_case_ ) if training_args.predict_with_generate: __snake_case = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ ) __snake_case = lmap(str.strip , snake_case_ ) write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
24
0
import fire from utils import calculate_rouge, save_json def lowerCAmelCase_ ( __a , __a , __a=None , **__a ) -> Optional[Any]: """simple docstring""" lowerCamelCase__: Any =[x.strip() for x in open(__a ).readlines()] lowerCamelCase__: Dict =[x.strip() for x in open(__a ).readlines()][: len(__a )] lowerCamelCase__: str =calculate_rouge(__a , __a , **__a ) if save_path is not None: save_json(__a , __a , indent=__a ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
10
from math import pi def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
24
0
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "gptj" __SCREAMING_SNAKE_CASE = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , __lowerCamelCase=5_0_4_0_0 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=4_0_9_6 , __lowerCamelCase=2_8 , __lowerCamelCase=1_6 , __lowerCamelCase=6_4 , __lowerCamelCase=None , __lowerCamelCase="gelu_new" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0_2 , __lowerCamelCase=True , __lowerCamelCase=5_0_2_5_6 , __lowerCamelCase=5_0_2_5_6 , __lowerCamelCase=False , **__lowerCamelCase , ) -> Any: _A : Dict = vocab_size _A : Any = n_positions _A : List[str] = n_embd _A : Optional[int] = n_layer _A : str = n_head _A : Union[str, Any] = n_inner _A : List[Any] = rotary_dim _A : int = activation_function _A : Dict = resid_pdrop _A : int = embd_pdrop _A : int = attn_pdrop _A : Tuple = layer_norm_epsilon _A : List[Any] = initializer_range _A : Dict = use_cache _A : Any = bos_token_id _A : Optional[int] = eos_token_id super().__init__( bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase) class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase = "default" , __lowerCamelCase = None , __lowerCamelCase = False , ) -> Dict: super().__init__(__lowerCamelCase , task=__lowerCamelCase , patching_specs=__lowerCamelCase , use_past=__lowerCamelCase) if not getattr(self._config , "pad_token_id" , __lowerCamelCase): # TODO: how to do that better? _A : Dict = 0 @property def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]: _A : Optional[int] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}}) if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs") _A : List[Any] = {0: "batch", 1: "past_sequence + sequence"} else: _A : Dict = {0: "batch", 1: "sequence"} return common_inputs @property def _lowerCamelCase ( self) -> int: return self._config.n_layer @property def _lowerCamelCase ( self) -> int: return self._config.n_head def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> Mapping[str, Any]: _A : Tuple = super(__lowerCamelCase , self).generate_dummy_inputs( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase) # We need to order the input in the way they appears in the forward() _A : Union[str, Any] = OrderedDict({"input_ids": common_inputs["input_ids"]}) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch _A , _A : Optional[Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values _A : Optional[int] = seqlen + 2 _A : Optional[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _A : List[str] = [ (torch.zeros(__lowerCamelCase), torch.zeros(__lowerCamelCase)) for _ in range(self.num_layers) ] _A : List[Any] = common_inputs["attention_mask"] if self.use_past: _A : str = ordered_inputs["attention_mask"].dtype _A : Union[str, Any] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase)] , dim=1) return ordered_inputs @property def _lowerCamelCase ( self) -> int: return 1_3
11
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[Any] = 'vit_msn' def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ): """simple docstring""" super().__init__(**a__ ) __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = image_size __snake_case = patch_size __snake_case = num_channels __snake_case = qkv_bias
24
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCamelCase__( __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Dict = ShapEPipeline UpperCAmelCase__ : Any = ['prompt'] UpperCAmelCase__ : List[Any] = ['prompt'] UpperCAmelCase__ : str = [ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] UpperCAmelCase__ : Dict = False @property def lowerCAmelCase__ ( self: int ): return 32 @property def lowerCAmelCase__ ( self: Union[str, Any] ): return 32 @property def lowerCAmelCase__ ( self: int ): return self.time_input_dim * 4 @property def lowerCAmelCase__ ( self: Optional[Any] ): return 8 @property def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def lowerCAmelCase__ ( self: str ): torch.manual_seed(0 ) __lowerCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(UpperCamelCase_ ) @property def lowerCAmelCase__ ( self: List[str] ): torch.manual_seed(0 ) __lowerCamelCase = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } __lowerCamelCase = PriorTransformer(**UpperCamelCase_ ) return model @property def lowerCAmelCase__ ( self: Dict ): torch.manual_seed(0 ) __lowerCamelCase = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } __lowerCamelCase = ShapERenderer(**UpperCamelCase_ ) return model def lowerCAmelCase__ ( self: str ): __lowerCamelCase = self.dummy_prior __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = self.dummy_tokenizer __lowerCamelCase = self.dummy_renderer __lowerCamelCase = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , ) __lowerCamelCase = { """prior""": prior, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """renderer""": renderer, """scheduler""": scheduler, } return components def lowerCAmelCase__ ( self: str , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=0 ): if str(UpperCamelCase_ ).startswith("""mps""" ): __lowerCamelCase = torch.manual_seed(UpperCamelCase_ ) else: __lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __lowerCamelCase = { """prompt""": """horse""", """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = """cpu""" __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**UpperCamelCase_ ) __lowerCamelCase = pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) ) __lowerCamelCase = output.images[0] __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCamelCase = np.array( [ 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase__ ( self: List[str] ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = torch_device == """cpu""" __lowerCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**UpperCamelCase_ ) __lowerCamelCase = pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = 1 __lowerCamelCase = 2 __lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ ) for key in inputs.keys(): if key in self.batch_params: __lowerCamelCase = batch_size * [inputs[key]] __lowerCamelCase = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_np_out.npy""" ) __lowerCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" ) __lowerCamelCase = pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 ) __lowerCamelCase = pipe( """a shark""" , generator=UpperCamelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
12
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def a (self : int , a__ : List[Any]=0 ): """simple docstring""" __snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) ) __snake_case = np.random.RandomState(a__ ) __snake_case = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.7_5, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def a (self : List[Any] ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def a (self : Dict ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : List[str] ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) # warmup pass to apply optimizations __snake_case = pipe(**self.get_dummy_inputs() ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : Any ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : Dict ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a (self : List[str] ): """simple docstring""" __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) __snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = self.get_dummy_inputs() __snake_case = pipe(**a__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def a (self : List[str] ): """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a (self : Optional[Any] ): """simple docstring""" __snake_case = ort.SessionOptions() __snake_case = False return options def a (self : Optional[Any] ): """simple docstring""" __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __snake_case = init_image.resize((768, 512) ) # using the PNDM scheduler by default __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = '''A fantasy landscape, trending on artstation''' __snake_case = np.random.RandomState(0 ) __snake_case = pipe( prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , ) __snake_case = output.images __snake_case = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def a (self : Dict ): """simple docstring""" __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __snake_case = init_image.resize((768, 512) ) __snake_case = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) __snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = '''A fantasy landscape, trending on artstation''' __snake_case = np.random.RandomState(0 ) __snake_case = pipe( prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , ) __snake_case = output.images __snake_case = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
24
0
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class __lowercase : """simple docstring""" _UpperCAmelCase : List[str] _UpperCAmelCase : Optional[str] = None # Automatically constructed _UpperCAmelCase : ClassVar[str] = "dict" _UpperCAmelCase : ClassVar[Any] = None _UpperCAmelCase : str = field(default='''Translation''' , init=UpperCAmelCase_ , repr=UpperCAmelCase_ ) def __call__( self : str): return pa.struct({lang: pa.string() for lang in sorted(self.languages)}) def _SCREAMING_SNAKE_CASE ( self : Tuple): from .features import Value return {k: Value("string") for k in sorted(self.languages)} @dataclass class __lowercase : """simple docstring""" _UpperCAmelCase : Optional[List] = None _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : Optional[str] = None # Automatically constructed _UpperCAmelCase : ClassVar[str] = "dict" _UpperCAmelCase : ClassVar[Any] = None _UpperCAmelCase : str = field(default='''TranslationVariableLanguages''' , init=UpperCAmelCase_ , repr=UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: List[str] = sorted(set(self.languages)) if self.languages else None SCREAMING_SNAKE_CASE_: Tuple = len(self.languages) if self.languages else None def __call__( self : Any): return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())}) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = set(self.languages) if self.languages and set(lowerCAmelCase__) - lang_set: raise ValueError( F"Some languages in example ({', '.join(sorted(set(lowerCAmelCase__) - lang_set))}) are not in valid set ({', '.join(lowerCAmelCase__)}).") # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. SCREAMING_SNAKE_CASE_: Tuple = [] for lang, text in translation_dict.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__): translation_tuples.append((lang, text)) else: translation_tuples.extend([(lang, el) for el in text]) # Ensure translations are in ascending order by language code. SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = zip(*sorted(lowerCAmelCase__)) return {"language": languages, "translation": translations} def _SCREAMING_SNAKE_CASE ( self : List[str]): from .features import Sequence, Value return { "language": Sequence(Value("string")), "translation": Sequence(Value("string")), }
13
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) snake_case_ = logging.getLogger(__name__) @dataclass(frozen=_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ : A_ : str A_ : str A_ : Optional[str] = None A_ : Optional[str] = None A_ : Optional[str] = None @dataclass(frozen=_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ : A_ : List[int] A_ : Optional[List[int]] = None A_ : Optional[List[int]] = None A_ : Optional[Union[int, float]] = None A_ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[InputFeatures] def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ): """simple docstring""" __snake_case = hans_processors[task]() __snake_case = os.path.join( a__ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , ) __snake_case = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case = label_list[2], label_list[1] __snake_case = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case = cached_features_file + '''.lock''' with FileLock(a__ ): if os.path.exists(a__ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case = torch.load(a__ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case = ( processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ ) ) logger.info('''Training examples: %s''' , len(a__ ) ) __snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ ) logger.info('''Saving features into cached file %s''' , a__ ) torch.save(self.features , a__ ) def __len__(self : int ): """simple docstring""" return len(self.features ) def __getitem__(self : Dict , a__ : List[Any] ): """simple docstring""" return self.features[i] def a (self : List[Any] ): """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class SCREAMING_SNAKE_CASE__ : A_ : List[InputFeatures] def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ): """simple docstring""" __snake_case = hans_processors[task]() __snake_case = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case = label_list[2], label_list[1] __snake_case = label_list __snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ ) __snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case = tf.data.Dataset.from_generator( a__ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def a (self : Union[str, Any] ): """simple docstring""" return self.dataset def __len__(self : Dict ): """simple docstring""" return len(self.features ) def __getitem__(self : Any , a__ : Dict ): """simple docstring""" return self.features[i] def a (self : str ): """simple docstring""" return self.label_list class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def a (self : Dict , a__ : Dict ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def a (self : Optional[int] , a__ : Tuple ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def a (self : int ): """simple docstring""" return ["contradiction", "entailment", "neutral"] def a (self : Any , a__ : Optional[int] , a__ : List[Any] ): """simple docstring""" __snake_case = [] for i, line in enumerate(a__ ): if i == 0: continue __snake_case = '''%s-%s''' % (set_type, line[0]) __snake_case = line[5] __snake_case = line[6] __snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case = line[0] examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) ) return examples def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]: __snake_case = {label: i for i, label in enumerate(snake_case_ )} __snake_case = [] for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case = tokenizer( example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , ) __snake_case = label_map[example.label] if example.label in label_map else 0 __snake_case = int(example.pairID ) features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features snake_case_ = { 'hans': 3, } snake_case_ = { 'hans': HansProcessor, }
24
0
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=10 ) -> int: """simple docstring""" A__ = [] for _ in range(lowercase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=10 ) -> List[str]: """simple docstring""" A__ = [] for step in range(lowercase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: A__ = os.path.join(lowercase_ , '''schedule.bin''' ) torch.save(scheduler.state_dict() , lowercase_ ) A__ = torch.load(lowercase_ ) scheduler.load_state_dict(lowercase_ ) return lrs @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]) ->Optional[int]: '''simple docstring''' self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__)) for a, b in zip(UpperCAmelCase__ , UpperCAmelCase__): self.assertAlmostEqual(UpperCAmelCase__ , UpperCAmelCase__ , delta=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]: '''simple docstring''' A__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase__) A__ = torch.tensor([0.4, 0.2, -0.5]) A__ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0) for _ in range(100): A__ = criterion(UpperCAmelCase__ , UpperCAmelCase__) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2) def SCREAMING_SNAKE_CASE ( self : str) ->Tuple: '''simple docstring''' A__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase__) A__ = torch.tensor([0.4, 0.2, -0.5]) A__ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase__ , weight_decay=0.0 , relative_step=UpperCAmelCase__ , scale_parameter=UpperCAmelCase__ , warmup_init=UpperCAmelCase__ , ) for _ in range(1_000): A__ = criterion(UpperCAmelCase__ , UpperCAmelCase__) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = nn.Linear(50 , 50 ) if is_torch_available() else None UpperCAmelCase__ = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None UpperCAmelCase__ = 10 def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]=None) ->Any: '''simple docstring''' self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__)) for a, b in zip(UpperCAmelCase__ , UpperCAmelCase__): self.assertAlmostEqual(UpperCAmelCase__ , UpperCAmelCase__ , delta=UpperCAmelCase__ , msg=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: '''simple docstring''' A__ = {'''num_warmup_steps''': 2, '''num_training_steps''': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) A__ = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): A__ , A__ = data A__ = scheduler_func(self.optimizer , **UpperCAmelCase__) self.assertEqual(len([scheduler.get_lr()[0]]) , 1) A__ = unwrap_schedule(UpperCAmelCase__ , self.num_steps) self.assertListAlmostEqual( UpperCAmelCase__ , UpperCAmelCase__ , tol=1e-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , ) A__ = scheduler_func(self.optimizer , **UpperCAmelCase__) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase__) # wrap to test picklability of the schedule A__ = unwrap_and_save_reload_schedule(UpperCAmelCase__ , self.num_steps) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ , msg=f"""failed for {scheduler_func} in save and reload""") class UpperCamelCase_ : '''simple docstring''' def __init__( self : int , UpperCAmelCase__ : int) ->Tuple: '''simple docstring''' A__ = fn def __call__( self : Optional[Any] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Dict) ->List[str]: '''simple docstring''' return self.fn(*UpperCAmelCase__ , **UpperCAmelCase__) @classmethod def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str) ->str: '''simple docstring''' A__ = list(map(self , scheduler.lr_lambdas))
14
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[str] = ['image_processor', 'tokenizer'] A_ : Optional[Any] = 'CLIPImageProcessor' A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ): """simple docstring""" __snake_case = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a__ , ) __snake_case = kwargs.pop('''feature_extractor''' ) __snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a__ , a__ ) def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ ) if images is not None: __snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ ) if text is not None and images is not None: __snake_case = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ ) def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*a__ , **a__ ) def a (self : Any , *a__ : List[Any] , **a__ : List[str] ): """simple docstring""" return self.tokenizer.decode(*a__ , **a__ ) @property def a (self : int ): """simple docstring""" __snake_case = self.tokenizer.model_input_names __snake_case = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
24
0
def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) <= 1: return [tuple(a_ )] __A = [] def generate(a_ , a_ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , a_ ) for i in range(k - 1 ): if k % 2 == 0: # k is even __A , __A = arr[k - 1], arr[i] else: # k is odd __A , __A = arr[k - 1], arr[0] generate(k - 1 , a_ ) generate(len(a_ ) , a_ ) return res if __name__ == "__main__": SCREAMING_SNAKE_CASE :int = input('Enter numbers separated by a comma:\n').strip() SCREAMING_SNAKE_CASE :Dict = [int(item) for item in user_input.split(',')] print(heaps(arr))
15
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Dict ): """simple docstring""" __snake_case = logging.get_logger() # the current default level is logging.WARNING __snake_case = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(a__ ) def a (self : Dict ): """simple docstring""" __snake_case = logging.get_verbosity() __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = '''Testing 1, 2, 3''' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , '''''' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(a__ ) as cl: logger.warning(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) # restore to the original level logging.set_verbosity(a__ ) @mockenv(TRANSFORMERS_VERBOSITY='''error''' ) def a (self : Dict ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() # this action activates the env var __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ ) __snake_case = logging.log_levels[env_level_str] __snake_case = logging.get_verbosity() self.assertEqual( a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level __snake_case = '''''' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='''super-error''' ) def a (self : List[Any] ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() __snake_case = logging.logging.getLogger() with CaptureLogger(a__ ) as cl: # this action activates the env var logging.get_logger('''transformers.models.bart.tokenization_bart''' ) self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out ) # no need to restore as nothing was changed def a (self : Any ): """simple docstring""" transformers.utils.logging._reset_library_root_logger() __snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) __snake_case = '''Testing 1, 2, 3''' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ): # nothing should be logged as env var disables this method with CaptureLogger(a__ ) as cl: logger.warning_advice(a__ ) self.assertEqual(cl.out , '''''' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(a__ ) as cl: logger.warning_advice(a__ ) self.assertEqual(cl.out , msg + '''\n''' ) def lowerCamelCase__ ( ) -> str: disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
24
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = { 'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'], 'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'], 'processing_wav2vec2': ['Wav2Vec2Processor'], 'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Wav2Vec2ForAudioFrameClassification', 'Wav2Vec2ForCTC', 'Wav2Vec2ForMaskedLM', 'Wav2Vec2ForPreTraining', 'Wav2Vec2ForSequenceClassification', 'Wav2Vec2ForXVector', 'Wav2Vec2Model', 'Wav2Vec2PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWav2Vec2ForCTC', 'TFWav2Vec2Model', 'TFWav2Vec2PreTrainedModel', 'TFWav2Vec2ForSequenceClassification', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'FlaxWav2Vec2ForCTC', 'FlaxWav2Vec2ForPreTraining', 'FlaxWav2Vec2Model', 'FlaxWav2Vec2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : List[str] = CpmAntTokenizer A_ : Optional[int] = False def a (self : Optional[int] ): """simple docstring""" super().setUp() __snake_case = [ '''<d>''', '''</d>''', '''<s>''', '''</s>''', '''</_>''', '''<unk>''', '''<pad>''', '''</n>''', '''我''', '''是''', '''C''', '''P''', '''M''', '''A''', '''n''', '''t''', ] __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) @tooslow def a (self : Dict ): """simple docstring""" __snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' ) __snake_case = '''今天天气真好!''' __snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!'''] __snake_case = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) __snake_case = '''今天天气真好!''' __snake_case = [tokenizer.bos_token] + tokens __snake_case = [6, 9802, 1_4962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ ) __snake_case = tokenizer.decode(a__ ) self.assertEqual(a__ , a__ )
24
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _a = { 'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ 'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'GraphormerForGraphClassification', 'GraphormerModel', 'GraphormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
17
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class SCREAMING_SNAKE_CASE__ : def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ): """simple docstring""" __snake_case = parent __snake_case = out_indices if out_indices is not None else [4] __snake_case = stage_names __snake_case = out_features __snake_case = backbone __snake_case = batch_size __snake_case = image_size __snake_case = num_channels __snake_case = use_pretrained_backbone __snake_case = is_training def a (self : Union[str, Any] ): """simple docstring""" __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case = self.get_config() return config, pixel_values def a (self : Any ): """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def a (self : List[Any] , a__ : int , a__ : int ): """simple docstring""" __snake_case = TimmBackbone(config=a__ ) model.to(a__ ) model.eval() with torch.no_grad(): __snake_case = model(a__ ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def a (self : str ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case = config_and_inputs __snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch @require_timm class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else () A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {} A_ : List[Any] = False A_ : Dict = False A_ : Any = False A_ : List[Any] = False def a (self : Tuple ): """simple docstring""" __snake_case = TimmBackboneModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ ) def a (self : Any ): """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a (self : int ): """simple docstring""" __snake_case = '''resnet18''' __snake_case = '''microsoft/resnet-18''' __snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ ) __snake_case = AutoBackbone.from_pretrained(a__ ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] ) __snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' ) def a (self : str ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''TimmBackbone initialization is managed on the timm side''' ) def a (self : Union[str, Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def a (self : Optional[int] ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : int ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def a (self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : Dict ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def a (self : List[Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' ) def a (self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' ) def a (self : List[Any] ): """simple docstring""" pass @unittest.skip('''Safetensors is not supported by timm.''' ) def a (self : Tuple ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def a (self : Tuple ): """simple docstring""" pass def a (self : Tuple ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , a__ ) def a (self : Dict ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = True __snake_case = self.has_attentions # no need to test all models as different heads yield the same functionality __snake_case = self.all_model_classes[0] __snake_case = model_class(a__ ) model.to(a__ ) __snake_case = self._prepare_for_class(a__ , a__ ) __snake_case = model(**a__ ) __snake_case = outputs[0][-1] # Encoder-/Decoder-only models __snake_case = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __snake_case = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=a__ ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def a (self : Optional[int] ): """simple docstring""" __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __snake_case = copy.deepcopy(a__ ) __snake_case = None __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __snake_case = copy.deepcopy(a__ ) __snake_case = False __snake_case = model_class(a__ ) model.to(a__ ) model.eval() __snake_case = model(**a__ )
24
0
from __future__ import annotations import math __lowerCamelCase : Tuple = '''2020.9.26''' __lowerCamelCase : Any = '''xcodz-dot, cclaus, dhruvmanila''' def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" if not all(isinstance(lowerCAmelCase , (float, int) ) for val in locals().values() ): SCREAMING_SNAKE_CASE_ : List[Any] = f'Input values must either be float or int: {list(locals().values() )}' raise TypeError(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] = ((x * distance) / (z + distance)) * scale SCREAMING_SNAKE_CASE_ : int = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : str , lowerCAmelCase : float ): """simple docstring""" if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise TypeError("Axis must be a str" ) SCREAMING_SNAKE_CASE_ : Any = locals() del input_variables["axis"] if not all(isinstance(lowerCAmelCase , (float, int) ) for val in input_variables.values() ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( "Input values except axis must either be float or int: " f'{list(input_variables.values() )}' ) raise TypeError(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] = (angle % 3_6_0) / 4_5_0 * 1_8_0 / math.pi if axis == "z": SCREAMING_SNAKE_CASE_ : Any = x * math.cos(lowerCAmelCase ) - y * math.sin(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : int = y * math.cos(lowerCAmelCase ) + x * math.sin(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = z elif axis == "x": SCREAMING_SNAKE_CASE_ : str = y * math.cos(lowerCAmelCase ) - z * math.sin(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : Dict = z * math.cos(lowerCAmelCase ) + y * math.sin(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[int] = x elif axis == "y": SCREAMING_SNAKE_CASE_ : Any = x * math.cos(lowerCAmelCase ) - z * math.sin(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : int = z * math.cos(lowerCAmelCase ) + x * math.sin(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : str = y else: raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(f'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''') print(f'''{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }''')
18
import os import pytest from transformers.dynamic_module_utils import get_imports snake_case_ = '\nimport os\n' snake_case_ = '\ndef foo():\n import os\n return False\n' snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n' snake_case_ = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , snake_case_ ) def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict: __snake_case = os.path.join(snake_case_ , '''test_file.py''' ) with open(snake_case_ , '''w''' ) as _tmp_file: _tmp_file.write(snake_case_ ) __snake_case = get_imports(snake_case_ ) assert parsed_imports == ["os"]
24
0
import math def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(lowerCamelCase__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("This should never happen" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. __A ='''Enter the base and the power separated by a comma: ''' __A, __A =map(int, input(prompt).split(''',''')) __A, __A =map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. __A =res(xa, ya) __A =res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
19
import socket def lowerCamelCase__ ( ) -> Any: __snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) __snake_case = socket.gethostname() __snake_case = 1_2312 sock.connect((host, port) ) sock.send(B'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: __snake_case = sock.recv(1024 ) if not data: break out_file.write(snake_case_ ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
24
0