code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer SCREAMING_SNAKE_CASE : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE : List[str] = { "vocab_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt" ), "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt", "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt" ), }, "tokenizer_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json" ), "google/electra-base-generator": ( "https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json" ), "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE : List[str] = { "google/electra-small-generator": 512, "google/electra-base-generator": 512, "google/electra-large-generator": 512, "google/electra-small-discriminator": 512, "google/electra-base-discriminator": 512, "google/electra-large-discriminator": 512, } SCREAMING_SNAKE_CASE : Tuple = { "google/electra-small-generator": {"do_lower_case": True}, "google/electra-base-generator": {"do_lower_case": True}, "google/electra-large-generator": {"do_lower_case": True}, "google/electra-small-discriminator": {"do_lower_case": True}, "google/electra-base-discriminator": {"do_lower_case": True}, "google/electra-large-discriminator": {"do_lower_case": True}, } class _lowerCamelCase( _a ): lowercase_ : int = VOCAB_FILES_NAMES lowercase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase_ : Any = PRETRAINED_INIT_CONFIGURATION lowercase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ : str = ElectraTokenizer def __init__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=True, lowerCamelCase="[UNK]", lowerCamelCase="[SEP]", lowerCamelCase="[PAD]", lowerCamelCase="[CLS]", lowerCamelCase="[MASK]", lowerCamelCase=True, lowerCamelCase=None, **lowerCamelCase, ) -> Union[str, Any]: """simple docstring""" super().__init__( lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, ) _lowercase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get('lowercase', lowerCamelCase) != do_lower_case or normalizer_state.get('strip_accents', lowerCamelCase) != strip_accents or normalizer_state.get('handle_chinese_chars', lowerCamelCase) != tokenize_chinese_chars ): _lowercase : Dict = getattr(lowerCamelCase, normalizer_state.pop('type')) _lowercase : Optional[int] = do_lower_case _lowercase : Tuple = strip_accents _lowercase : str = tokenize_chinese_chars _lowercase : Optional[Any] = normalizer_class(**lowerCamelCase) _lowercase : Any = do_lower_case def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]: """simple docstring""" _lowercase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]: """simple docstring""" _lowercase : Dict = [self.sep_token_id] _lowercase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]: """simple docstring""" _lowercase : Optional[int] = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase) return tuple(lowerCamelCase)
21
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :Dict = logging.get_logger(__name__) lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : List[str] = 'openai-gpt' _a : int = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = afn _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = attn_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = summary_type _UpperCAmelCase = summary_use_proj _UpperCAmelCase = summary_activation _UpperCAmelCase = summary_first_dropout _UpperCAmelCase = summary_proj_to_labels super().__init__(**_SCREAMING_SNAKE_CASE )
329
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor __SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__) class A_ ( lowerCAmelCase_ ): def __init__( self : Tuple , *snake_case_ : str , **snake_case_ : Union[str, Any] ): warnings.warn( "The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DeiTImageProcessor instead." , snake_case_ , ) super().__init__(*snake_case_ , **snake_case_ )
22
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' , [None, 'v2'] ) def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: Union[str, Any] ) -> Tuple: '''simple docstring''' _UpperCAmelCase = hf_hub_url(repo_id=a__ , path=a__ , revision=a__ ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a__ )}'''
329
0
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor UpperCamelCase__: Tuple = logging.getLogger(__name__) UpperCamelCase__: str = 50 # max width of layer names UpperCamelCase__: List[str] = 70 # max width of quantizer names def snake_case_ ( _lowerCAmelCase : List[Any] ) -> Any: UpperCAmelCase : List[str] = parser.add_argument_group('''quant_trainer arguments''' ) group.add_argument('''--wprec''' , type=_lowerCAmelCase , default=8 , help='''weight precision''' ) group.add_argument('''--aprec''' , type=_lowerCAmelCase , default=8 , help='''activation precision''' ) group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' ) group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' ) group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' ) group.add_argument('''--quant-disable-keyword''' , type=_lowerCAmelCase , nargs='''+''' , help='''disable quantizers by keyword''' ) group.add_argument('''--quant-disable-layer-module''' , type=_lowerCAmelCase , help='''disable quantizers by keyword under layer.''' ) group.add_argument('''--quant-enable-layer-module''' , type=_lowerCAmelCase , help='''enable quantizers by keyword under layer''' ) group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' ) group.add_argument('''--percentile''' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='''percentile for PercentileCalibrator''' ) group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' ) group.add_argument('''--clip-gelu''' , metavar='''N''' , type=_lowerCAmelCase , help='''clip gelu output maximum value to N''' ) group.add_argument( '''--recalibrate-weights''' , action='''store_true''' , help=( '''recalibrate weight amaxes by taking the max of the weights.''' ''' amaxes will be computed with the current quantization granularity (axis).''' ) , ) def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> List[Any]: if args.calibrator == "max": UpperCAmelCase : str = '''max''' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('''Specify --percentile when using percentile calibrator''' ) UpperCAmelCase : Optional[int] = '''histogram''' elif args.calibrator == "mse": UpperCAmelCase : List[str] = '''histogram''' else: raise ValueError(f"""Invalid calibrator {args.calibrator}""" ) UpperCAmelCase : Optional[int] = QuantDescriptor(num_bits=args.aprec , calib_method=_lowerCAmelCase ) UpperCAmelCase : List[str] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_lowerCAmelCase ) quant_nn.QuantLinear.set_default_quant_desc_weight(_lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : int=False ) -> List[str]: logger.info('''Configuring Model for Quantization''' ) logger.info(f"""using quantization package {pytorch_quantization.__file__}""" ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_lowerCAmelCase , ['''embeddings'''] , which='''weight''' , _disabled=_lowerCAmelCase ) if args.quant_disable: set_quantizer_by_name(_lowerCAmelCase , [''''''] , _disabled=_lowerCAmelCase ) if args.quant_disable_keyword: set_quantizer_by_name(_lowerCAmelCase , args.quant_disable_keyword , _disabled=_lowerCAmelCase ) if args.quant_disable_layer_module: set_quantizer_by_name(_lowerCAmelCase , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=_lowerCAmelCase ) if args.quant_enable_layer_module: set_quantizer_by_name(_lowerCAmelCase , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=_lowerCAmelCase ) if args.recalibrate_weights: recalibrate_weights(_lowerCAmelCase ) if args.fuse_qkv: fuse_qkv(_lowerCAmelCase , _lowerCAmelCase ) if args.clip_gelu: clip_gelu(_lowerCAmelCase , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : str ) -> List[Any]: logger.info('''Enabling Calibration''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f"""{name:80}: {module}""" ) def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ) -> str: logger.info('''Loading calibrated amax''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('''percentile''' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ) -> Any: def fusea(_lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ): for mod in [qq, qk, qv]: if not hasattr(_lowerCAmelCase , '''_amax''' ): print(''' WARNING: NO AMAX BUFFER''' ) return UpperCAmelCase : Union[str, Any] = qq._amax.detach().item() UpperCAmelCase : Tuple = qk._amax.detach().item() UpperCAmelCase : Optional[int] = qv._amax.detach().item() UpperCAmelCase : str = max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) qq._amax.fill_(_lowerCAmelCase ) qk._amax.fill_(_lowerCAmelCase ) qv._amax.fill_(_lowerCAmelCase ) logger.info(f""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" ) for name, mod in model.named_modules(): if name.endswith('''.attention.self''' ): logger.info(f"""FUSE_QKV: {name:{name_width}}""" ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] ) -> Optional[Any]: for name, mod in model.named_modules(): if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ): UpperCAmelCase : Union[str, Any] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_lowerCAmelCase ) UpperCAmelCase : Dict = mod._input_quantizer._amax.data.detach().item() logger.info(f"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" ) def snake_case_ ( _lowerCAmelCase : Dict ) -> Any: for name, mod in model.named_modules(): if hasattr(_lowerCAmelCase , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None: UpperCAmelCase : Any = mod.weight.shape[0] UpperCAmelCase : Dict = mod._weight_quantizer._amax.detach() UpperCAmelCase : Any = torch.ones(_lowerCAmelCase , dtype=amax.dtype , device=amax.device ) * amax print(f"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" ) def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]: for name, mod in model.named_modules(): if hasattr(_lowerCAmelCase , '''_weight_quantizer''' ): if not hasattr(mod.weight_quantizer , '''_amax''' ): print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) UpperCAmelCase : str = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) UpperCAmelCase : Any = set(range(len(mod.weight.size() ) ) ) - axis_set UpperCAmelCase : List[str] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_lowerCAmelCase , keepdims=_lowerCAmelCase ).detach() logger.info(f"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" ) UpperCAmelCase : Optional[Any] = amax def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any]=25 , _lowerCAmelCase : Dict=180 , _lowerCAmelCase : str=None ) -> str: if ignore is None: UpperCAmelCase : Optional[Any] = [] elif not isinstance(_lowerCAmelCase , _lowerCAmelCase ): UpperCAmelCase : Any = [ignore] UpperCAmelCase : Tuple = 0 for name, mod in model.named_modules(): if not hasattr(_lowerCAmelCase , '''weight''' ): continue UpperCAmelCase : Union[str, Any] = max(_lowerCAmelCase , len(_lowerCAmelCase ) ) for name, mod in model.named_modules(): UpperCAmelCase : Union[str, Any] = getattr(_lowerCAmelCase , '''_input_quantizer''' , _lowerCAmelCase ) UpperCAmelCase : Tuple = getattr(_lowerCAmelCase , '''_weight_quantizer''' , _lowerCAmelCase ) if not hasattr(_lowerCAmelCase , '''weight''' ): continue if type(_lowerCAmelCase ) in ignore: continue if [True for s in ignore if type(_lowerCAmelCase ) is str and s in name]: continue UpperCAmelCase : str = f"""Act:{input_q.extra_repr()}""" UpperCAmelCase : int = f"""Wgt:{weight_q.extra_repr()}""" UpperCAmelCase : Tuple = f"""{name:{name_width}} {act_str} {wgt_str}""" if len(_lowerCAmelCase ) <= line_width: logger.info(_lowerCAmelCase ) else: logger.info(f"""{name:{name_width}} {act_str}""" ) logger.info(f"""{" ":{name_width}} {wgt_str}""" ) def snake_case_ ( _lowerCAmelCase : Dict ) -> List[Any]: UpperCAmelCase : List[Any] = 0 for name, mod in model.named_modules(): if isinstance(_lowerCAmelCase , pytorch_quantization.nn.TensorQuantizer ): print(f"""{name:80} {mod}""" ) count += 1 print(f"""{count} TensorQuantizers found in model""" ) def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ) -> Tuple: UpperCAmelCase : Any = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if quantizer_mod is not None: assert hasattr(_lowerCAmelCase , _lowerCAmelCase ) setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: logger.warning(f"""{name} has no {quantizer}""" ) def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any]="both" , **_lowerCAmelCase : Optional[Any] ) -> List[str]: UpperCAmelCase : List[Any] = f"""Warning: changing {which} quantizers of {name:{qname_width}}""" for k, v in kwargs.items(): s += f""" {k}={v}""" if which in ["input", "both"]: set_quantizer(_lowerCAmelCase , _lowerCAmelCase , '''_input_quantizer''' , _lowerCAmelCase , _lowerCAmelCase ) if which in ["weight", "both"]: set_quantizer(_lowerCAmelCase , _lowerCAmelCase , '''_weight_quantizer''' , _lowerCAmelCase , _lowerCAmelCase ) logger.info(_lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Any ) -> str: for name, mod in model.named_modules(): if hasattr(_lowerCAmelCase , '''_input_quantizer''' ) or hasattr(_lowerCAmelCase , '''_weight_quantizer''' ): for n in names: if re.search(_lowerCAmelCase , _lowerCAmelCase ): set_quantizers(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ) elif name.endswith('''_quantizer''' ): for n in names: if re.search(_lowerCAmelCase , _lowerCAmelCase ): UpperCAmelCase : List[str] = f"""Warning: changing {name:{name_width}}""" for k, v in kwargs.items(): s += f""" {k}={v}""" setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) logger.info(_lowerCAmelCase )
23
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCAmelCase__ :Optional[int] = [ '''python''', '''tqdm''', '''regex''', '''requests''', '''packaging''', '''filelock''', '''numpy''', '''tokenizers''', '''huggingface-hub''', '''safetensors''', '''accelerate''', '''pyyaml''', ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any: '''simple docstring''' require_version(deps[pkg] , a__ )
329
0
def lowerCamelCase__ ( snake_case_ : int = 1 , snake_case_ : int = 1000 ) -> int: __snake_case = 1 __snake_case = 0 for divide_by_number in range(snake_case_ , digit + 1 ): __snake_case = [] __snake_case = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(snake_case_ ): __snake_case = len(snake_case_ ) __snake_case = divide_by_number else: has_been_divided.append(snake_case_ ) __snake_case = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
24
from __future__ import annotations def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start] while stack: _UpperCAmelCase = stack.pop() explored.add(a__ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(a__ ) return explored lowerCAmelCase__ :Tuple = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
329
0
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase__ : str = logging.get_logger(__name__) UpperCAmelCase__ : int = { 'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': ( 'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Optional[Any] = '''trajectory_transformer''' __UpperCamelCase : str = ['''past_key_values'''] __UpperCamelCase : Optional[Any] = { '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__(self , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2_49 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=17 , SCREAMING_SNAKE_CASE__=25 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=1_28 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0006 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5_02_56 , SCREAMING_SNAKE_CASE__=5_02_56 , **SCREAMING_SNAKE_CASE__ , ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size SCREAMING_SNAKE_CASE__ : Optional[Any] = action_weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = reward_weight SCREAMING_SNAKE_CASE__ : List[Any] = value_weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : Dict = block_size SCREAMING_SNAKE_CASE__ : Tuple = action_dim SCREAMING_SNAKE_CASE__ : List[str] = observation_dim SCREAMING_SNAKE_CASE__ : Union[str, Any] = transition_dim SCREAMING_SNAKE_CASE__ : List[str] = learning_rate SCREAMING_SNAKE_CASE__ : Dict = n_layer SCREAMING_SNAKE_CASE__ : Dict = n_head SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_embd SCREAMING_SNAKE_CASE__ : str = embd_pdrop SCREAMING_SNAKE_CASE__ : int = attn_pdrop SCREAMING_SNAKE_CASE__ : Any = resid_pdrop SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE__ : str = layer_norm_eps SCREAMING_SNAKE_CASE__ : Optional[int] = kaiming_initializer_range SCREAMING_SNAKE_CASE__ : int = use_cache super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
25
import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )] _UpperCAmelCase = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('.bin' ) for f in files ) @slow @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 4 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3 assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1 _UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(_SCREAMING_SNAKE_CASE ) == num_samples def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = FlaxDDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = scheduler.create_state() _UpperCAmelCase = scheduler_state _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # With memory efficient attention _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1e-2
329
0
def lowerCAmelCase_ ( snake_case_ ): assert column_title.isupper() _A : Any = 0 _A : List[str] = len(snake_case_ ) - 1 _A : Optional[Any] = 0 while index >= 0: _A : Optional[int] = (ord(column_title[index] ) - 64) * pow(26,snake_case_ ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
26
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase__ :int = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase ) class __a ( UpperCAmelCase ): def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = {} if prompt is not None: _UpperCAmelCase = prompt if generate_kwargs is not None: _UpperCAmelCase = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _UpperCAmelCase = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) _UpperCAmelCase = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE ) if prompt is not None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError( f'''Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. ''' 'Note also that one single text can be provided for conditional image to text generation.' ) _UpperCAmelCase = self.model.config.model_type if model_type == "git": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids _UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids _UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(_SCREAMING_SNAKE_CASE ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _UpperCAmelCase = None return model_inputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]: """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE ) and all(x is None for x in model_inputs['input_ids'] ) ): _UpperCAmelCase = None if generate_kwargs is None: _UpperCAmelCase = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _UpperCAmelCase = model_inputs.pop(self.model.main_input_name ) _UpperCAmelCase = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) return model_outputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = [] for output_ids in model_outputs: _UpperCAmelCase = { 'generated_text': self.tokenizer.decode( _SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , ) } records.append(_SCREAMING_SNAKE_CASE ) return records
329
0
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): __a : List[str] = list(_SCREAMING_SNAKE_CASE ) __a : List[Any] = list(_SCREAMING_SNAKE_CASE ) __a : Tuple = 0 for i in range(len(_SCREAMING_SNAKE_CASE ) ): if lista[i] != lista[i]: count += 1 __a : Any = '_' if count > 1: return False else: return "".join(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[str] ): __a : Any = [] while True: __a : Union[str, Any] = ['$'] * len(_SCREAMING_SNAKE_CASE ) __a : Tuple = [] for i in range(len(_SCREAMING_SNAKE_CASE ) ): for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ): __a : List[Any] = compare_string(binary[i] , binary[j] ) if k is False: __a : Dict = '*' __a : Optional[Any] = '*' temp.append('X' ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): if checka[i] == "$": pi.append(binary[i] ) if len(_SCREAMING_SNAKE_CASE ) == 0: return pi __a : Union[str, Any] = list(set(_SCREAMING_SNAKE_CASE ) ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Sequence[float] ): __a : Optional[Any] = [] for minterm in minterms: __a : List[Any] = '' for _ in range(_SCREAMING_SNAKE_CASE ): __a : List[str] = str(minterm % 2 ) + string minterm //= 2 temp.append(_SCREAMING_SNAKE_CASE ) return temp def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ): __a : List[str] = list(_SCREAMING_SNAKE_CASE ) __a : Tuple = list(_SCREAMING_SNAKE_CASE ) __a : Tuple = 0 for i in range(len(_SCREAMING_SNAKE_CASE ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : list[str] ): __a : int = [] __a : str = [0] * len(_SCREAMING_SNAKE_CASE ) for i in range(len(chart[0] ) ): __a : Any = 0 __a : Union[str, Any] = -1 for j in range(len(_SCREAMING_SNAKE_CASE ) ): if chart[j][i] == 1: count += 1 __a : Optional[int] = j if count == 1: __a : Optional[Any] = 1 for i in range(len(_SCREAMING_SNAKE_CASE ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(_SCREAMING_SNAKE_CASE ) ): __a : List[Any] = 0 temp.append(prime_implicants[i] ) while True: __a : Any = 0 __a : Any = -1 __a : int = 0 for i in range(len(_SCREAMING_SNAKE_CASE ) ): __a : Any = chart[i].count(1 ) if count_n > max_n: __a : str = count_n __a : Any = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(_SCREAMING_SNAKE_CASE ) ): __a : List[str] = 0 def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[str] , _SCREAMING_SNAKE_CASE : list[str] ): __a : int = [[0 for x in range(len(_SCREAMING_SNAKE_CASE ) )] for x in range(len(_SCREAMING_SNAKE_CASE ) )] for i in range(len(_SCREAMING_SNAKE_CASE ) ): __a : Union[str, Any] = prime_implicants[i].count('_' ) for j in range(len(_SCREAMING_SNAKE_CASE ) ): if is_for_table(prime_implicants[i] , binary[j] , _SCREAMING_SNAKE_CASE ): __a : int = 1 return chart def lowerCamelCase (): __a : Any = int(input('Enter the no. of variables\n' ) ) __a : Union[str, Any] = [ float(_SCREAMING_SNAKE_CASE ) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split() ] __a : List[str] = decimal_to_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Any = check(_SCREAMING_SNAKE_CASE ) print('Prime Implicants are:' ) print(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = prime_implicant_chart(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : List[str] = selection(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print('Essential Prime Implicants are:' ) print(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() main()
27
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]: '''simple docstring''' from .. import __version__ _UpperCAmelCase = take_from _UpperCAmelCase = () if not isinstance(args[0] , a__ ): _UpperCAmelCase = (args,) for attribute, version_name, message in args: if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ): raise ValueError( F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'''' F''' version {__version__} is >= {version_name}''' ) _UpperCAmelCase = None if isinstance(a__ , a__ ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(a__ ),) _UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.''' elif hasattr(a__ , a__ ): values += (getattr(a__ , a__ ),) _UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.''' elif deprecated_kwargs is None: _UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.''' if warning is not None: _UpperCAmelCase = warning + ' ' if standard_warn else '' warnings.warn(warning + message , a__ , stacklevel=a__ ) if isinstance(a__ , a__ ) and len(a__ ) > 0: _UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1] _UpperCAmelCase = call_frame.filename _UpperCAmelCase = call_frame.lineno _UpperCAmelCase = call_frame.function _UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' ) if len(a__ ) == 0: return elif len(a__ ) == 1: return values[0] return values
329
0
'''simple docstring''' def __lowerCamelCase ( A__ = 50 ) -> int: """simple docstring""" UpperCamelCase = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
28
import math lowerCAmelCase__ :Optional[int] = 1_0 lowerCAmelCase__ :Optional[Any] = 7 lowerCAmelCase__ :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS def lowerCAmelCase__ ( a__: int = 2_0 ) -> str: '''simple docstring''' _UpperCAmelCase = math.comb(a__ , a__ ) _UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ ) _UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total) return F'''{result:.9f}''' if __name__ == "__main__": print(solution(2_0))
329
0
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(_snake_case ) class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) self.check_model_type(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {} if padding is not None: UpperCAmelCase_ : List[str] = padding if truncation is not None: UpperCAmelCase_ : Tuple = truncation if top_k is not None: UpperCAmelCase_ : Dict = top_k return preprocess_params, {}, postprocess_params def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int: if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question} else: UpperCAmelCase_ : List[str] = image UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase ) return results def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = load_image(inputs['image'] ) UpperCAmelCase_ : Dict = self.tokenizer( inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase ) UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework ) model_inputs.update(_UpperCamelCase ) return model_inputs def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]: UpperCAmelCase_ : Any = self.model(**_UpperCamelCase ) return model_outputs def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str: if top_k > self.model.config.num_labels: UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0] UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase ) else: raise ValueError(f"Unsupported framework: {self.framework}" ) UpperCAmelCase_ : Optional[Any] = scores.tolist() UpperCAmelCase_ : Tuple = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
29
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ :str = { '''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :Union[str, Any] = [ '''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegatronBertForCausalLM''', '''MegatronBertForMaskedLM''', '''MegatronBertForMultipleChoice''', '''MegatronBertForNextSentencePrediction''', '''MegatronBertForPreTraining''', '''MegatronBertForQuestionAnswering''', '''MegatronBertForSequenceClassification''', '''MegatronBertForTokenClassification''', '''MegatronBertModel''', '''MegatronBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
329
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __a = logging.get_logger(__name__) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Union[str, Any] = 'upernet' def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_8_4 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=2_5_5 , **SCREAMING_SNAKE_CASE_ : str , ) -> int: super().__init__(**SCREAMING_SNAKE_CASE_ ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowercase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase_ = backbone_config.get('''model_type''' ) lowercase_ = CONFIG_MAPPING[backbone_model_type] lowercase_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ ) lowercase_ = backbone_config lowercase_ = hidden_size lowercase_ = initializer_range lowercase_ = pool_scales lowercase_ = use_auxiliary_head lowercase_ = auxiliary_loss_weight lowercase_ = auxiliary_in_channels lowercase_ = auxiliary_channels lowercase_ = auxiliary_num_convs lowercase_ = auxiliary_concat_input lowercase_ = loss_ignore_index def _lowercase ( self : List[str] ) -> List[str]: lowercase_ = copy.deepcopy(self.__dict__ ) lowercase_ = self.backbone_config.to_dict() lowercase_ = self.__class__.model_type return output
30
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = AutoConfig.from_pretrained(a__ ) _UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ ) _UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ ) _UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": _UpperCAmelCase = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": _UpperCAmelCase = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = 'TransientGlobalSelfAttention' else: raise ValueError( 'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`' ' attribute with a value from [\'local\', \'transient-global].' ) # Encoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_global_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = tax_mlp_layer_norm _UpperCAmelCase = flax_model_encoder_layer_block # Only for layer 0: _UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_global_rel_embedding # Assigning _UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale'] _UpperCAmelCase = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] _UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_pre_attention_layer_norm _UpperCAmelCase = tax_enc_dec_attention_key _UpperCAmelCase = tax_enc_dec_attention_out _UpperCAmelCase = tax_enc_dec_attention_query _UpperCAmelCase = tax_enc_dec_attention_value _UpperCAmelCase = tax_cross_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = txa_mlp_layer_norm _UpperCAmelCase = flax_model_decoder_layer_block # Decoder Normalization _UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale'] _UpperCAmelCase = txa_decoder_norm # Only for layer 0: _UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_decoder_rel_embedding # Token Embeddings _UpperCAmelCase = tax_model['target']['token_embedder']['embedding'] _UpperCAmelCase = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(a__ ) print('T5X Model was sucessfully converted!' ) if __name__ == "__main__": lowerCAmelCase__ :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) lowerCAmelCase__ :List[str] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
329
0
'''simple docstring''' from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Any: """simple docstring""" _UpperCAmelCase : List[Any] = [] for part_id in partition_order: _UpperCAmelCase : Any = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect() for row_idx, row in enumerate(_UpperCAmelCase ): expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def UpperCamelCase_ ( ) -> int: """simple docstring""" _UpperCAmelCase : str = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() _UpperCAmelCase : Optional[Any] = spark.range(100 ).repartition(1 ) _UpperCAmelCase : Optional[int] = Spark(_UpperCAmelCase ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def UpperCamelCase_ ( ) -> Dict: """simple docstring""" _UpperCAmelCase : str = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() _UpperCAmelCase : Optional[int] = spark.range(10 ).repartition(2 ) _UpperCAmelCase : Union[str, Any] = [1, 0] _UpperCAmelCase : Dict = _generate_iterable_examples(_UpperCAmelCase , _UpperCAmelCase ) # Reverse the partitions. _UpperCAmelCase : Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCAmelCase , _UpperCAmelCase ) for i, (row_id, row_dict) in enumerate(generate_fn() ): _UpperCAmelCase , _UpperCAmelCase : Tuple = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def UpperCamelCase_ ( ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() _UpperCAmelCase : List[str] = spark.range(10 ).repartition(1 ) _UpperCAmelCase : List[str] = SparkExamplesIterable(_UpperCAmelCase ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(_UpperCAmelCase ): assert row_id == F"""0_{i}""" assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def UpperCamelCase_ ( ) -> str: """simple docstring""" _UpperCAmelCase : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() _UpperCAmelCase : Any = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: _UpperCAmelCase : Optional[Any] = lambda _UpperCAmelCase : x.reverse() _UpperCAmelCase : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCAmelCase , [2, 1, 0] ) _UpperCAmelCase : Optional[Any] = SparkExamplesIterable(_UpperCAmelCase ).shuffle_data_sources(_UpperCAmelCase ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(_UpperCAmelCase ): _UpperCAmelCase , _UpperCAmelCase : List[str] = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def UpperCamelCase_ ( ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : List[str] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() _UpperCAmelCase : Optional[int] = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 _UpperCAmelCase : Dict = SparkExamplesIterable(_UpperCAmelCase ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 _UpperCAmelCase : int = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCAmelCase , [0, 2] ) for i, (row_id, row_dict) in enumerate(_UpperCAmelCase ): _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 _UpperCAmelCase : Any = SparkExamplesIterable(_UpperCAmelCase ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 _UpperCAmelCase : int = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCAmelCase , [1, 3] ) for i, (row_id, row_dict) in enumerate(_UpperCAmelCase ): _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def UpperCamelCase_ ( ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() _UpperCAmelCase : str = spark.range(100 ).repartition(1 ) _UpperCAmelCase : int = Spark(_UpperCAmelCase ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
31
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :List[Any] = logging.get_logger(__name__) lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : str = 'ctrl' _a : Tuple = ['past_key_values'] _a : List[Any] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = dff _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache super().__init__(**_SCREAMING_SNAKE_CASE )
329
0
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class SCREAMING_SNAKE_CASE__ ( lowercase__ ): def __init__( self : str , SCREAMING_SNAKE_CASE__ : UNetaDModel , SCREAMING_SNAKE_CASE__ : UNetaDModel , SCREAMING_SNAKE_CASE__ : DDPMScheduler , SCREAMING_SNAKE_CASE__ : Dict , ) -> List[Any]: super().__init__() a_ : Tuple = value_function a_ : List[str] = unet a_ : Optional[int] = scheduler a_ : str = env a_ : str = env.get_dataset() a_ : Dict = {} for key in self.data.keys(): try: a_ : Any = self.data[key].mean() except: # noqa: E722 pass a_ : List[Any] = {} for key in self.data.keys(): try: a_ : int = self.data[key].std() except: # noqa: E722 pass a_ : int = env.observation_space.shape[0] a_ : Tuple = env.action_space.shape[0] def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple: return (x_in - self.means[key]) / self.stds[key] def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Any: return x_in * self.stds[key] + self.means[key] def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]: if type(SCREAMING_SNAKE_CASE__ ) is dict: return {k: self.to_torch(SCREAMING_SNAKE_CASE__ ) for k, v in x_in.items()} elif torch.is_tensor(SCREAMING_SNAKE_CASE__ ): return x_in.to(self.unet.device ) return torch.tensor(SCREAMING_SNAKE_CASE__ , device=self.unet.device ) def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any: for key, val in cond.items(): a_ : int = val.clone() return x_in def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any: a_ : Optional[int] = x.shape[0] a_ : Optional[Any] = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model a_ : Optional[int] = torch.full((batch_size,) , SCREAMING_SNAKE_CASE__ , device=self.unet.device , dtype=torch.long ) for _ in range(SCREAMING_SNAKE_CASE__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models a_ : Tuple = self.value_function(x.permute(0 , 2 , 1 ) , SCREAMING_SNAKE_CASE__ ).sample a_ : List[Any] = torch.autograd.grad([y.sum()] , [x] )[0] a_ : Union[str, Any] = self.scheduler._get_variance(SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = torch.exp(0.5 * posterior_variance ) a_ : List[Any] = model_std * grad a_ : List[str] = 0 a_ : Dict = x.detach() a_ : Any = x + scale * grad a_ : int = self.reset_xa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.action_dim ) a_ : Tuple = self.unet(x.permute(0 , 2 , 1 ) , SCREAMING_SNAKE_CASE__ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg a_ : List[str] = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , predict_epsilon=SCREAMING_SNAKE_CASE__ )['prev_sample'] # apply conditions to the trajectory (set the initial state) a_ : Optional[Any] = self.reset_xa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.action_dim ) a_ : Union[str, Any] = self.to_torch(SCREAMING_SNAKE_CASE__ ) return x, y def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=6_4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Dict=0.1 ) -> List[Any]: # normalize the observations and create batch dimension a_ : List[str] = self.normalize(SCREAMING_SNAKE_CASE__ , 'observations' ) a_ : Dict = obs[None].repeat(SCREAMING_SNAKE_CASE__ , axis=0 ) a_ : Any = {0: self.to_torch(SCREAMING_SNAKE_CASE__ )} a_ : Optional[int] = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) a_ : List[str] = randn_tensor(SCREAMING_SNAKE_CASE__ , device=self.unet.device ) a_ : Dict = self.reset_xa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.action_dim ) a_ : Union[str, Any] = self.to_torch(SCREAMING_SNAKE_CASE__ ) # run the diffusion process a_ , a_ : List[str] = self.run_diffusion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # sort output trajectories by value a_ : List[str] = y.argsort(0 , descending=SCREAMING_SNAKE_CASE__ ).squeeze() a_ : int = x[sorted_idx] a_ : Optional[int] = sorted_values[:, :, : self.action_dim] a_ : Optional[Any] = actions.detach().cpu().numpy() a_ : Dict = self.de_normalize(SCREAMING_SNAKE_CASE__ , key='actions' ) # select the action with the highest value if y is not None: a_ : str = 0 else: # if we didn't run value guiding, select a random action a_ : List[Any] = np.random.randint(0 , SCREAMING_SNAKE_CASE__ ) a_ : Tuple = denorm_actions[selected_index, 0] return denorm_actions
32
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_pad def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" if not batched: _UpperCAmelCase = image_inputs[0] if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ): _UpperCAmelCase , _UpperCAmelCase = image.size else: _UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2] if w < h: _UpperCAmelCase = int(self.size['shortest_edge'] * h / w ) _UpperCAmelCase = self.size['shortest_edge'] elif w > h: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = int(self.size['shortest_edge'] * w / h ) else: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = self.size['shortest_edge'] else: _UpperCAmelCase = [] for image in image_inputs: _UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0] _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : str = DeformableDetrImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DeformableDetrImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> str: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'image_id': 39769, 'annotations': target} # encode them _UpperCAmelCase = DeformableDetrImageProcessor() _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) @slow def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target} _UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them _UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' ) _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify masks _UpperCAmelCase = 822873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
329
0
"""simple docstring""" import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup __A : Tuple = { '''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''' } def lowercase ( __snake_case : str = "dhaka" , __snake_case : int = 5 ): lowercase_ : Dict = min(__snake_case , 5_0 ) # Prevent abuse! lowercase_ : Optional[int] = { '''q''': query, '''tbm''': '''isch''', '''hl''': '''en''', '''ijn''': '''0''', } lowercase_ : int = requests.get('''https://www.google.com/search''' , params=__snake_case , headers=__snake_case ) lowercase_ : List[Any] = BeautifulSoup(html.text , '''html.parser''' ) lowercase_ : Tuple = ''''''.join( re.findall(r'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) ) lowercase_ : Union[str, Any] = json.dumps(__snake_case ) lowercase_ : Optional[int] = json.loads(__snake_case ) lowercase_ : Dict = re.findall( r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , __snake_case , ) if not matched_google_image_data: return 0 lowercase_ : Optional[int] = re.sub( r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(__snake_case ) , ) lowercase_ : Optional[Any] = re.findall( r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , __snake_case , ) for index, fixed_full_res_image in enumerate(__snake_case ): if index >= max_images: return index lowercase_ : List[str] = bytes(__snake_case , '''ascii''' ).decode( '''unicode-escape''' ) lowercase_ : Any = bytes(__snake_case , '''ascii''' ).decode( '''unicode-escape''' ) lowercase_ : Any = urllib.request.build_opener() lowercase_ : List[str] = [ ( '''User-Agent''', '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''', ) ] urllib.request.install_opener(__snake_case ) lowercase_ : Dict = F'''query_{query.replace(' ' , '_' )}''' if not os.path.exists(__snake_case ): os.makedirs(__snake_case ) urllib.request.urlretrieve( # noqa: S310 __snake_case , F'''{path_name}/original_size_img_{index}.jpg''' ) return index if __name__ == "__main__": try: __A : Union[str, Any] = download_images_from_google_query(sys.argv[1]) print(F"""{image_count} images were downloaded to disk.""") except IndexError: print('''Please provide a search term.''') raise
33
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __a ( unittest.TestCase ): _a : List[str] = JukeboxTokenizer _a : List[Any] = { 'artist': 'Zac Brown Band', 'genres': 'Country', 'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ', } @require_torch def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
329
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
34
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__) def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser( description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' ) parser.add_argument( '--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , ) parser.add_argument( '--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' ) parser.add_argument( '--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , ) parser.add_argument( '--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , ) parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] ) parser.add_argument( '--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , ) parser.add_argument( '--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum' ' sequence length that is a multiple of 8.' , ) parser.add_argument( '--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the' ' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord' ' shards will be directly saved to a Google Cloud Storage bucket.' , ) _UpperCAmelCase = parser.parse_args() return args def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]: '''simple docstring''' def fn(a__: str ): return tokenizer(examples['text'] ) return fn def lowerCAmelCase__ ( a__: List[str] ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for i in range(len(tokenized_data['input_ids'] ) ): _UpperCAmelCase = { 'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ), 'attention_mask': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ), } _UpperCAmelCase = tf.train.Features(feature=a__ ) _UpperCAmelCase = tf.train.Example(features=a__ ) _UpperCAmelCase = example.SerializeToString() records.append(a__ ) return records def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int: '''simple docstring''' _UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _UpperCAmelCase = min(len(a__ ) , args.limit ) _UpperCAmelCase = dataset.select(range(a__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _UpperCAmelCase = os.path.join(args.output_dir , args.split ) if not os.path.exists(a__ ): os.makedirs(a__ ) else: _UpperCAmelCase = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _UpperCAmelCase = tokenize_function(a__ ) _UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(a__: Optional[int] ): # Concatenate all texts. _UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()} _UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _UpperCAmelCase = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. _UpperCAmelCase = { k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )] for k, t in concatenated_examples.items() } return result _UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 ) _UpperCAmelCase = 0 _UpperCAmelCase = 0 for shard in range(0 , len(a__ ) , args.shard_size ): _UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size] _UpperCAmelCase = len(dataset_snapshot['input_ids'] ) _UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) _UpperCAmelCase = get_serialized_examples(a__ ) with tf.io.TFRecordWriter(a__ ) as out_file: for i in range(len(a__ ) ): _UpperCAmelCase = serialized_examples[i] out_file.write(a__ ) print('Wrote file {} containing {} records'.format(a__ , a__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f: print(F'''Total {args.split} records: {total_records}''' , file=a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = parse_args() main(args)
329
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for _ in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = [] for step in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(a__ , 'schedule.bin' ) torch.save(scheduler.state_dict() , a__ ) _UpperCAmelCase = torch.load(a__ ) scheduler.load_state_dict(a__ ) return lrs @require_torch class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(100 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = Adafactor( params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , ) for _ in range(1000 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class __a ( unittest.TestCase ): _a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None _a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _a : List[Any] = 10 def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _UpperCAmelCase = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _UpperCAmelCase , _UpperCAmelCase = data _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListAlmostEqual( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule _UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' ) class __a : def __init__( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = fn def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @classmethod def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
329
0
from math import pow def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): '''simple docstring''' if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _lowerCAmelCase : Any = int(pow(_lowerCamelCase , _lowerCamelCase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _lowerCAmelCase , _lowerCAmelCase : Any = backtrack( _lowerCamelCase , _lowerCamelCase , current_number + 1 , _lowerCamelCase , _lowerCamelCase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _lowerCAmelCase , _lowerCAmelCase : List[Any] = backtrack( _lowerCamelCase , _lowerCamelCase , current_number + 1 , _lowerCamelCase , _lowerCamelCase ) return current_sum, solutions_count def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10): raise ValueError( "Invalid input\n" "needed_sum must be between 1 and 1000, power between 2 and 10." ) return backtrack(_lowerCamelCase , _lowerCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
36
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ :Any = logging.get_logger(__name__) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = original_name.split('.' )[0] _UpperCAmelCase = key.split('.' ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] ) _UpperCAmelCase = orig_block_num - offset _UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def lowerCAmelCase__ ( a__: Tuple ) -> int: '''simple docstring''' _UpperCAmelCase = OrderedDict() _UpperCAmelCase , _UpperCAmelCase = 0, 0 for key, value in state_dict.items(): if key.startswith('network' ): _UpperCAmelCase = key.replace('network' , 'poolformer.encoder' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('bias' ) and "patch_embed" not in key: patch_emb_offset += 1 _UpperCAmelCase = key[: key.find('proj' )] _UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' ) _UpperCAmelCase = key.replace('proj' , 'projection' ) if key.endswith('bias' ): total_embed_found += 1 if "patch_embeddings" in key: _UpperCAmelCase = 'poolformer.encoder.' + key if "mlp.fc1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' ) if "mlp.fc2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' ) if "norm1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' ) if "norm2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' ) if "layer_scale_1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' ) if "layer_scale_2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' ) if "head" in key: _UpperCAmelCase = key.replace('head' , 'classifier' ) _UpperCAmelCase = value return new_state_dict def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw ) return image @torch.no_grad() def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict: '''simple docstring''' _UpperCAmelCase = PoolFormerConfig() # set attributes based on model_name _UpperCAmelCase = 'huggingface/label-files' _UpperCAmelCase = model_name[-3:] _UpperCAmelCase = 1_0_0_0 _UpperCAmelCase = 'imagenet-1k-id2label.json' _UpperCAmelCase = (1, 1_0_0_0) # set config attributes _UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) ) _UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} if size == "s12": _UpperCAmelCase = [2, 2, 6, 2] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s24": _UpperCAmelCase = [4, 4, 1_2, 4] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.9 elif size == "m36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 elif size == "m48": _UpperCAmelCase = [8, 8, 2_4, 8] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 else: raise ValueError(F'''Size {size} not supported''' ) # load image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) # Prepare image _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict _UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) ) # rename keys _UpperCAmelCase = rename_keys(a__ ) # create HuggingFace model and load state dict _UpperCAmelCase = PoolFormerForImageClassification(a__ ) model.load_state_dict(a__ ) model.eval() # Define image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) _UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values # forward pass _UpperCAmelCase = model(a__ ) _UpperCAmelCase = outputs.logits # define expected logit slices for different models if size == "s12": _UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": _UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": _UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": _UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": _UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(F'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(a__ ).mkdir(exist_ok=a__ ) model.save_pretrained(a__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''poolformer_s12''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase__ :Dict = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
329
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if bit_count < 0: raise ValueError("""The given input must be positive""" ) # get the generated string sequence lowerCAmelCase__ : List[str] = gray_code_sequence_string(UpperCamelCase ) # # convert them to integers for i in range(len(UpperCamelCase ) ): lowerCAmelCase__ : int = int(sequence[i] , 2 ) return sequence def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] lowerCAmelCase__ : Optional[Any] = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits lowerCAmelCase__ : Union[str, Any] = gray_code_sequence_string(bit_count - 1 ) lowerCAmelCase__ : List[Any] = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): lowerCAmelCase__ : Optional[Any] = """0""" + smaller_sequence[i] sequence.append(UpperCamelCase ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): lowerCAmelCase__ : int = """1""" + smaller_sequence[i] sequence.append(UpperCamelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
37
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = process _UpperCAmelCase = params def __len__( self ) -> Union[str, Any]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = self.dataset[i] _UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params ) return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = loader _UpperCAmelCase = infer _UpperCAmelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _UpperCAmelCase = None _UpperCAmelCase = loader_batch_size # Internal bookkeeping _UpperCAmelCase = None _UpperCAmelCase = None def __len__( self ) -> Any: """simple docstring""" return len(self.loader ) def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _UpperCAmelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _UpperCAmelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Convert ModelOutput to tuple first _UpperCAmelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _UpperCAmelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _UpperCAmelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE ) self._loader_batch_index += 1 return result def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _UpperCAmelCase = next(self.iterator ) _UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size # Setting internal index to unwrap the batch _UpperCAmelCase = processed _UpperCAmelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple: """simple docstring""" super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __iter__( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) _UpperCAmelCase = None return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if self.subiterator is None: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _UpperCAmelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) _UpperCAmelCase = next(self.subiterator ) return processed class __a ( UpperCAmelCase ): def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = False _UpperCAmelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator while not is_last: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size _UpperCAmelCase = processed _UpperCAmelCase = 0 while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator else: _UpperCAmelCase = processed _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) return accumulator class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = key def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return self.dataset[i][self.key] class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = keya _UpperCAmelCase = keya def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
329
0
import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def _A ( self : Any ): for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(__lowerCamelCase ): UpperCamelCase :List[str] = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Optional[int] = FlaxAutoModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _A ( self : Optional[int] ): for model_name in ["roberta-base", "roberta-large"]: with self.subTest(__lowerCamelCase ): UpperCamelCase :Dict = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Union[str, Any] = FlaxAutoModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _A ( self : int ): for model_name in ["bert-base-cased", "bert-large-uncased"]: UpperCamelCase :str = AutoTokenizer.from_pretrained(__lowerCamelCase ) UpperCamelCase :Union[str, Any] = FlaxBertModel.from_pretrained(__lowerCamelCase ) UpperCamelCase :List[Any] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX ) @jax.jit def eval(**__lowerCamelCase : int ): return model(**__lowerCamelCase ) eval(**__lowerCamelCase ).block_until_ready() @slow def _A ( self : Union[str, Any] ): for model_name in ["roberta-base", "roberta-large"]: UpperCamelCase :Tuple = AutoTokenizer.from_pretrained(__lowerCamelCase ) UpperCamelCase :Any = FlaxRobertaModel.from_pretrained(__lowerCamelCase ) UpperCamelCase :List[str] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX ) @jax.jit def eval(**__lowerCamelCase : Any ): return model(**__lowerCamelCase ) eval(**__lowerCamelCase ).block_until_ready() def _A ( self : Any ): with self.assertRaisesRegex( __lowerCamelCase , """bert-base is not a local folder and is not a valid model identifier""" ): UpperCamelCase :Dict = FlaxAutoModel.from_pretrained("""bert-base""" ) def _A ( self : Union[str, Any] ): with self.assertRaisesRegex( __lowerCamelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCamelCase :Optional[int] = FlaxAutoModel.from_pretrained(__lowerCamelCase , revision="""aaaaaa""" ) def _A ( self : Any ): with self.assertRaisesRegex( __lowerCamelCase , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ): UpperCamelCase :int = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" ) def _A ( self : List[Any] ): with self.assertRaisesRegex(__lowerCamelCase , """Use `from_pt=True` to load this model""" ): UpperCamelCase :str = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
38
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ :int = logging.get_logger(__name__) lowerCAmelCase__ :Optional[Any] = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class __a ( UpperCAmelCase ): _a : str = 'data2vec-text' def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class __a ( UpperCAmelCase ): @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
329
0
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]: """simple docstring""" _UpperCAmelCase = (boundary[1] - boundary[0]) / steps _UpperCAmelCase = boundary[0] _UpperCAmelCase = boundary[1] _UpperCAmelCase = make_points(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = 0.0 y += (h / 2.0) * f(__lowerCAmelCase ) for i in x_i: # print(i) y += h * f(__lowerCAmelCase ) y += (h / 2.0) * f(__lowerCAmelCase ) return y def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]: """simple docstring""" _UpperCAmelCase = a + h while x < (b - h): yield x _UpperCAmelCase = x + h def __A ( __lowerCAmelCase )-> Optional[Any]: # enter your function here """simple docstring""" _UpperCAmelCase = (x - 0) * (x - 0) return y def __A ( )-> int: """simple docstring""" _UpperCAmelCase = 0.0 # Lower bound of integration _UpperCAmelCase = 1.0 # Upper bound of integration _UpperCAmelCase = 10.0 # define number of steps or resolution _UpperCAmelCase = [a, b] # define boundary of integration _UpperCAmelCase = method_a(__lowerCAmelCase , __lowerCAmelCase ) print(F"""y = {y}""" ) if __name__ == "__main__": main()
39
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = patch_norm _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = is_training _UpperCAmelCase = scope _UpperCAmelCase = use_labels _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = encoder_stride _UpperCAmelCase = out_features _UpperCAmelCase = out_indices def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['stem'] _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : int = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} _a : Optional[int] = False _a : List[str] = False _a : List[str] = False _a : Optional[int] = False _a : Tuple = False def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE ) @unittest.skip('Swin does not use inputs_embeds' ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" pass @unittest.skip('Swin does not support feedforward chunking' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # Swin has a different seq_length _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 0 return t def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ): with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple() def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has''' f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.''' ) , ) recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) @require_torch class __a ( unittest.TestCase , UpperCAmelCase ): _a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else () _a : Any = MaskFormerSwinConfig def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: _UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE ) backbone.to(_SCREAMING_SNAKE_CASE ) backbone.eval() _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.attentions )
329
0
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ )-> Dict: '''simple docstring''' a : str = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a : Union[str, Any] = 128 elif "12-12" in model_name: a : List[Any] = 12 a : str = 12 elif "14-14" in model_name: a : List[Any] = 14 a : Optional[int] = 14 elif "16-16" in model_name: a : Any = 16 a : List[Any] = 16 else: raise ValueError("Model not supported" ) a : Optional[int] = "huggingface/label-files" if "speech-commands" in model_name: a : Optional[int] = 35 a : List[str] = "speech-commands-v2-id2label.json" else: a : Optional[Any] = 527 a : Tuple = "audioset-id2label.json" a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()} a : Any = idalabel a : str = {v: k for k, v in idalabel.items()} return config def lowercase ( A_ )-> Tuple: '''simple docstring''' if "module.v" in name: a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: a : str = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: a : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: a : str = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: a : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def lowercase ( A_ , A_ )-> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : str = orig_state_dict.pop(A_ ) if "qkv" in key: a : int = key.split("." ) a : Optional[int] = int(key_split[3] ) a : int = config.hidden_size if "weight" in key: a : List[str] = val[:dim, :] a : Any = val[dim : dim * 2, :] a : int = val[-dim:, :] else: a : Optional[Any] = val[:dim] a : Union[str, Any] = val[dim : dim * 2] a : str = val[-dim:] else: a : str = val return orig_state_dict def lowercase ( A_ )-> Dict: '''simple docstring''' a : Union[str, Any] = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A_ , A_ ) @torch.no_grad() def lowercase ( A_ , A_ , A_=False )-> Optional[int]: '''simple docstring''' a : Optional[int] = get_audio_spectrogram_transformer_config(A_ ) a : Dict = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict a : Any = model_name_to_url[model_name] a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" ) # remove some keys remove_keys(A_ ) # rename some keys a : Union[str, Any] = convert_state_dict(A_ , A_ ) # load 🤗 model a : List[str] = ASTForAudioClassification(A_ ) model.eval() model.load_state_dict(A_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8 a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6 a : str = 1_024 if "speech-commands" not in model_name else 128 a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ ) if "speech-commands" in model_name: a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" ) a : int = dataset[0]["audio"]["array"] else: a : Tuple = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) a , a : Tuple = torchaudio.load(A_ ) a : Optional[Any] = waveform.squeeze().numpy() a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass a : Optional[Any] = model(**A_ ) a : List[str] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(A_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowercase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
40
from collections.abc import Generator def lowerCAmelCase__ ( ) -> Generator[int, None, None]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = 0, 1 while True: _UpperCAmelCase , _UpperCAmelCase = b, a + b yield b def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int: '''simple docstring''' _UpperCAmelCase = 1 _UpperCAmelCase = fibonacci_generator() while len(str(next(a__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
329
0
'''simple docstring''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bool: if not isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase__ : int = f'''Input value of [number={number}] must be an integer''' raise TypeError(UpperCamelCase ) if number < 0: return False lowerCamelCase__ : Tuple = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
41
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 30} _UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize_and_center_crop _UpperCAmelCase = size _UpperCAmelCase = crop_pct _UpperCAmelCase = crop_size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> int: """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = PoolFormerImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 30} ) self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
329
0
'''simple docstring''' def SCREAMING_SNAKE_CASE__ ( __A = 1_000_000 ) -> int: _snake_case = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , __A ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
42
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20} _UpperCAmelCase = do_thumbnail _UpperCAmelCase = do_align_axis _UpperCAmelCase = do_pad _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : List[str] = DonutImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DonutImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @is_flaky() def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
329
0
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __lowercase = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __lowercase = [file for file in filepaths if file != file.lower()] if upper_files: print(F'{len(upper_files)} files contain uppercase characters:') print('''\n'''.join(upper_files) + '''\n''') __lowercase = [file for file in filepaths if ''' ''' in file] if space_files: print(F'{len(space_files)} files contain space characters:') print('''\n'''.join(space_files) + '''\n''') __lowercase = [file for file in filepaths if '''-''' in file] if hyphen_files: print(F'{len(hyphen_files)} files contain hyphen characters:') print('''\n'''.join(hyphen_files) + '''\n''') __lowercase = [file for file in filepaths if os.sep not in file] if nodir_files: print(F'{len(nodir_files)} files are not in a directory:') print('''\n'''.join(nodir_files) + '''\n''') __lowercase = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
43
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :Dict = logging.get_logger(__name__) lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : List[str] = 'openai-gpt' _a : int = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = afn _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = attn_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = summary_type _UpperCAmelCase = summary_use_proj _UpperCAmelCase = summary_activation _UpperCAmelCase = summary_first_dropout _UpperCAmelCase = summary_proj_to_labels super().__init__(**_SCREAMING_SNAKE_CASE )
329
0
"""simple docstring""" # This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests _a : List[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
44
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' , [None, 'v2'] ) def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: Union[str, Any] ) -> Tuple: '''simple docstring''' _UpperCAmelCase = hf_hub_url(repo_id=a__ , path=a__ , revision=a__ ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a__ )}'''
329
0
"""simple docstring""" import torch from torch import nn class __lowerCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self , _a , _a , _a , _a , _a=1 , _a=False ): super().__init__() __a = n_token __a = d_embed __a = d_proj __a = cutoffs + [n_token] __a = [0] + self.cutoffs __a = div_val __a = self.cutoffs[0] __a = len(self.cutoffs ) - 1 __a = self.shortlist_size + self.n_clusters if self.n_clusters > 0: __a = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) ) __a = nn.Parameter(torch.zeros(self.n_clusters ) ) __a = nn.ModuleList() __a = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(_a , _a ) ) ) else: self.out_projs.append(_a ) self.out_layers.append(nn.Linear(_a , _a ) ) else: for i in range(len(self.cutoffs ) ): __a , __a = self.cutoff_ends[i], self.cutoff_ends[i + 1] __a = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(_a , _a ) ) ) self.out_layers.append(nn.Linear(_a , r_idx - l_idx ) ) __a = keep_order def __UpperCAmelCase ( self , _a , _a , _a , _a ): if proj is None: __a = nn.functional.linear(_a , _a , bias=_a ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: __a = nn.functional.linear(_a , proj.t().contiguous() ) __a = nn.functional.linear(_a , _a , bias=_a ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def __UpperCAmelCase ( self , _a , _a=None , _a=False ): if labels is not None: # Shift so that tokens < n predict n __a = hidden[..., :-1, :].contiguous() __a = labels[..., 1:].contiguous() __a = hidden.view(-1 , hidden.size(-1 ) ) __a = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' ) else: __a = hidden.view(-1 , hidden.size(-1 ) ) if self.n_clusters == 0: __a = self._compute_logit(_a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) if labels is not None: __a = labels != -100 __a = torch.zeros_like(_a , dtype=hidden.dtype , device=hidden.device ) __a = ( -nn.functional.log_softmax(_a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: __a = nn.functional.log_softmax(_a , dim=-1 ) else: # construct weights and biases __a , __a = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: __a , __a = self.cutoff_ends[i], self.cutoff_ends[i + 1] __a = self.out_layers[0].weight[l_idx:r_idx] __a = self.out_layers[0].bias[l_idx:r_idx] else: __a = self.out_layers[i].weight __a = self.out_layers[i].bias if i == 0: __a = torch.cat([weight_i, self.cluster_weight] , dim=0 ) __a = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(_a ) biases.append(_a ) __a , __a , __a = weights[0], biases[0], self.out_projs[0] __a = self._compute_logit(_a , _a , _a , _a ) __a = nn.functional.log_softmax(_a , dim=1 ) if labels is None: __a = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: __a = torch.zeros_like(_a , dtype=hidden.dtype , device=hidden.device ) __a = 0 __a = [0] + self.cutoffs for i in range(len(_a ) - 1 ): __a , __a = cutoff_values[i], cutoff_values[i + 1] if labels is not None: __a = (labels >= l_idx) & (labels < r_idx) __a = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue __a = labels.index_select(0 , _a ) - l_idx __a = head_logprob.index_select(0 , _a ) __a = hidden.index_select(0 , _a ) else: __a = hidden if i == 0: if labels is not None: __a = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 ) else: __a = head_logprob[:, : self.cutoffs[0]] else: __a , __a , __a = weights[i], biases[i], self.out_projs[i] __a = self._compute_logit(_a , _a , _a , _a ) __a = nn.functional.log_softmax(_a , dim=1 ) __a = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: __a = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None] ).squeeze(1 ) else: __a = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i __a = logprob_i if labels is not None: if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order: out.index_copy_(0 , _a , -logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def __UpperCAmelCase ( self , _a ): if self.n_clusters == 0: __a = self._compute_logit(_a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) return nn.functional.log_softmax(_a , dim=-1 ) else: # construct weights and biases __a , __a = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: __a , __a = self.cutoff_ends[i], self.cutoff_ends[i + 1] __a = self.out_layers[0].weight[l_idx:r_idx] __a = self.out_layers[0].bias[l_idx:r_idx] else: __a = self.out_layers[i].weight __a = self.out_layers[i].bias if i == 0: __a = torch.cat([weight_i, self.cluster_weight] , dim=0 ) __a = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(_a ) biases.append(_a ) __a , __a , __a = weights[0], biases[0], self.out_projs[0] __a = self._compute_logit(_a , _a , _a , _a ) __a = hidden.new_empty((head_logit.size(0 ), self.n_token) ) __a = nn.functional.log_softmax(_a , dim=1 ) __a = [0] + self.cutoffs for i in range(len(_a ) - 1 ): __a , __a = cutoff_values[i], cutoff_values[i + 1] if i == 0: __a = head_logprob[:, : self.cutoffs[0]] else: __a , __a , __a = weights[i], biases[i], self.out_projs[i] __a = self._compute_logit(_a , _a , _a , _a ) __a = nn.functional.log_softmax(_a , dim=1 ) __a = head_logprob[:, -i] + tail_logprob_i __a = logprob_i return out
45
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCAmelCase__ :Optional[int] = [ '''python''', '''tqdm''', '''regex''', '''requests''', '''packaging''', '''filelock''', '''numpy''', '''tokenizers''', '''huggingface-hub''', '''safetensors''', '''accelerate''', '''pyyaml''', ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any: '''simple docstring''' require_version(deps[pkg] , a__ )
329
0
"""simple docstring""" import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("0.12.2"): raise Exception("requires fairseq >= 0.12.2") if version.parse(fairseq.__version__) > version.parse("2"): raise Exception("requires fairseq < v2") logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = "Hello, World!" SCREAMING_SNAKE_CASE__ = "en_XX" def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ): '''simple docstring''' lowerCAmelCase = Path("""data_bin""" ) lowerCAmelCase = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , ) xmod.eval() # disable dropout print(SCREAMING_SNAKE_CASE ) lowerCAmelCase = xmod.model.encoder.sentence_encoder lowerCAmelCase = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: lowerCAmelCase = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0] print("""Our X-MOD config:""" , SCREAMING_SNAKE_CASE ) lowerCAmelCase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE ) model.eval() # Now let's copy all the weights. # Embeddings lowerCAmelCase = xmod_sent_encoder.embed_tokens.weight lowerCAmelCase = xmod_sent_encoder.embed_positions.weight lowerCAmelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. lowerCAmelCase = xmod_sent_encoder.layernorm_embedding.weight lowerCAmelCase = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer lowerCAmelCase = model.roberta.encoder.layer[i] lowerCAmelCase = xmod_sent_encoder.layers[i] # self attention lowerCAmelCase = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError("""Dimensions of self-attention weights do not match.""" ) lowerCAmelCase = xmod_layer.self_attn.q_proj.weight lowerCAmelCase = xmod_layer.self_attn.q_proj.bias lowerCAmelCase = xmod_layer.self_attn.k_proj.weight lowerCAmelCase = xmod_layer.self_attn.k_proj.bias lowerCAmelCase = xmod_layer.self_attn.v_proj.weight lowerCAmelCase = xmod_layer.self_attn.v_proj.bias # self-attention output lowerCAmelCase = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("""Dimensions of self-attention output weights do not match.""" ) lowerCAmelCase = xmod_layer.self_attn.out_proj.weight lowerCAmelCase = xmod_layer.self_attn.out_proj.bias lowerCAmelCase = xmod_layer.self_attn_layer_norm.weight lowerCAmelCase = xmod_layer.self_attn_layer_norm.bias # intermediate lowerCAmelCase = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("""Dimensions of intermediate weights do not match.""" ) lowerCAmelCase = xmod_layer.fca.weight lowerCAmelCase = xmod_layer.fca.bias # output lowerCAmelCase = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("""Dimensions of feed-forward weights do not match.""" ) lowerCAmelCase = xmod_layer.fca.weight lowerCAmelCase = xmod_layer.fca.bias lowerCAmelCase = xmod_layer.final_layer_norm.weight lowerCAmelCase = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: lowerCAmelCase = xmod_layer.adapter_layer_norm.weight lowerCAmelCase = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError("""Lists of language adapters do not match.""" ) for lang_code, adapter in xmod_layer.adapter_modules.items(): lowerCAmelCase = bert_output.adapter_modules[lang_code] lowerCAmelCase = xmod_layer.adapter_modules[lang_code] lowerCAmelCase = from_adapter.fca.weight lowerCAmelCase = from_adapter.fca.bias lowerCAmelCase = from_adapter.fca.weight lowerCAmelCase = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: lowerCAmelCase = xmod_sent_encoder.layer_norm.weight lowerCAmelCase = xmod_sent_encoder.layer_norm.bias if classification_head: lowerCAmelCase = xmod.model.classification_heads["""mnli"""].dense.weight lowerCAmelCase = xmod.model.classification_heads["""mnli"""].dense.bias lowerCAmelCase = xmod.model.classification_heads["""mnli"""].out_proj.weight lowerCAmelCase = xmod.model.classification_heads["""mnli"""].out_proj.bias else: # LM Head lowerCAmelCase = xmod.model.encoder.lm_head.dense.weight lowerCAmelCase = xmod.model.encoder.lm_head.dense.bias lowerCAmelCase = xmod.model.encoder.lm_head.layer_norm.weight lowerCAmelCase = xmod.model.encoder.lm_head.layer_norm.bias lowerCAmelCase = xmod.model.encoder.lm_head.weight lowerCAmelCase = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. lowerCAmelCase = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(SCREAMING_SNAKE_CASE )[0] if classification_head: lowerCAmelCase = xmod.model.classification_heads["""mnli"""](xmod.extract_features(SCREAMING_SNAKE_CASE ) ) else: lowerCAmelCase = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7 lowerCAmelCase = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" ) if not success: raise Exception("""Something went wRoNg""" ) Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) print(F'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
46
from __future__ import annotations def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start] while stack: _UpperCAmelCase = stack.pop() explored.add(a__ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(a__ ) return explored lowerCAmelCase__ :Tuple = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
329
0
'''simple docstring''' # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler") class A__ : def __init__( self : List[str] , _a : Optional[Any] , _a : str , _a : bool = True , _a : bool = False ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =scheduler _SCREAMING_SNAKE_CASE =optimizers if isinstance(_a , (list, tuple) ) else [optimizers] _SCREAMING_SNAKE_CASE =split_batches _SCREAMING_SNAKE_CASE =step_with_optimizer _SCREAMING_SNAKE_CASE =GradientState() def A ( self : Dict , *_a : List[Any] , **_a : Any ) -> Union[str, Any]: '''simple docstring''' if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*_a , **_a ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*_a , **_a ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step _SCREAMING_SNAKE_CASE =AcceleratorState().num_processes for _ in range(_a ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , 'total_steps' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*_a , **_a ) else: self.scheduler.step(*_a , **_a ) def A ( self : Dict ) -> Tuple: '''simple docstring''' return self.scheduler.get_last_lr() def A ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' return self.scheduler.state_dict() def A ( self : Any , _a : str ) -> int: '''simple docstring''' self.scheduler.load_state_dict(_a ) def A ( self : Any ) -> Optional[Any]: '''simple docstring''' return self.scheduler.get_lr() def A ( self : List[Any] , *_a : List[str] , **_a : int ) -> int: '''simple docstring''' return self.scheduler.print_lr(*_a , **_a )
47
import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )] _UpperCAmelCase = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('.bin' ) for f in files ) @slow @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 4 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3 assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1 _UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(_SCREAMING_SNAKE_CASE ) == num_samples def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = FlaxDDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = scheduler.create_state() _UpperCAmelCase = scheduler_state _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # With memory efficient attention _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1e-2
329
0
def A ( _SCREAMING_SNAKE_CASE ) -> Any: lowerCamelCase : Optional[Any] = [] lowerCamelCase : str = set({"(", "[", "{"} ) lowerCamelCase : int = set({")", "]", "}"} ) lowerCamelCase : Union[str, Any] = {"{": "}", "[": "]", "(": ")"} for i in range(len(_SCREAMING_SNAKE_CASE ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(_SCREAMING_SNAKE_CASE ) == 0 or (len(_SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(_SCREAMING_SNAKE_CASE ) == 0 def A ( ) -> Union[str, Any]: lowerCamelCase : int = input("Enter sequence of brackets: " ) if is_balanced(_SCREAMING_SNAKE_CASE ): print(_SCREAMING_SNAKE_CASE ,"is balanced" ) else: print(_SCREAMING_SNAKE_CASE ,"is not balanced" ) if __name__ == "__main__": main()
48
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase__ :int = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase ) class __a ( UpperCAmelCase ): def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = {} if prompt is not None: _UpperCAmelCase = prompt if generate_kwargs is not None: _UpperCAmelCase = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _UpperCAmelCase = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) _UpperCAmelCase = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE ) if prompt is not None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError( f'''Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. ''' 'Note also that one single text can be provided for conditional image to text generation.' ) _UpperCAmelCase = self.model.config.model_type if model_type == "git": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids _UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids _UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(_SCREAMING_SNAKE_CASE ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _UpperCAmelCase = None return model_inputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]: """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE ) and all(x is None for x in model_inputs['input_ids'] ) ): _UpperCAmelCase = None if generate_kwargs is None: _UpperCAmelCase = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _UpperCAmelCase = model_inputs.pop(self.model.main_input_name ) _UpperCAmelCase = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) return model_outputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = [] for output_ids in model_outputs: _UpperCAmelCase = { 'generated_text': self.tokenizer.decode( _SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , ) } records.append(_SCREAMING_SNAKE_CASE ) return records
329
0
from typing import TYPE_CHECKING from ....utils import _LazyModule __snake_case :Any = {'''tokenization_tapex''': ['''TapexTokenizer''']} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys __snake_case :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
49
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]: '''simple docstring''' from .. import __version__ _UpperCAmelCase = take_from _UpperCAmelCase = () if not isinstance(args[0] , a__ ): _UpperCAmelCase = (args,) for attribute, version_name, message in args: if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ): raise ValueError( F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'''' F''' version {__version__} is >= {version_name}''' ) _UpperCAmelCase = None if isinstance(a__ , a__ ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(a__ ),) _UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.''' elif hasattr(a__ , a__ ): values += (getattr(a__ , a__ ),) _UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.''' elif deprecated_kwargs is None: _UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.''' if warning is not None: _UpperCAmelCase = warning + ' ' if standard_warn else '' warnings.warn(warning + message , a__ , stacklevel=a__ ) if isinstance(a__ , a__ ) and len(a__ ) > 0: _UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1] _UpperCAmelCase = call_frame.filename _UpperCAmelCase = call_frame.lineno _UpperCAmelCase = call_frame.function _UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' ) if len(a__ ) == 0: return elif len(a__ ) == 1: return values[0] return values
329
0
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class lowerCAmelCase ( __UpperCamelCase ): def __init__( self : List[Any] , UpperCAmelCase : NestedDataStructureLike[PathLike] , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Union[str, Any] , ) -> Tuple: super().__init__( UpperCAmelCase , split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , ) lowerCamelCase__ : Union[str, Any] = path_or_paths if isinstance(UpperCAmelCase , UpperCAmelCase ) else {self.split: path_or_paths} lowerCamelCase__ : Dict = Text( cache_dir=UpperCAmelCase , data_files=UpperCAmelCase , features=UpperCAmelCase , **UpperCAmelCase , ) def A_ ( self : Optional[int] ) -> Optional[int]: # Build iterable dataset if self.streaming: lowerCamelCase__ : List[str] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : List[Any] = None lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Optional[Any] = None self.builder.download_and_prepare( download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , ) lowerCamelCase__ : List[Any] = self.builder.as_dataset( split=self.split , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory ) return dataset
50
import math lowerCAmelCase__ :Optional[int] = 1_0 lowerCAmelCase__ :Optional[Any] = 7 lowerCAmelCase__ :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS def lowerCAmelCase__ ( a__: int = 2_0 ) -> str: '''simple docstring''' _UpperCAmelCase = math.comb(a__ , a__ ) _UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ ) _UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total) return F'''{result:.9f}''' if __name__ == "__main__": print(solution(2_0))
329
0
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() snake_case_ : Optional[int] = logging.get_logger(__name__) def A (__A : List[str] ) -> List[str]: """simple docstring""" UpperCAmelCase_ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError('''Quantized models are not supported.''' ) UpperCAmelCase_ = re.match(R'''^mobilenet_v1_([^_]*)_([^_]*)$''' , __A ) if matches: UpperCAmelCase_ = float(matches[1] ) UpperCAmelCase_ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". UpperCAmelCase_ = 1001 UpperCAmelCase_ = '''imagenet-1k-id2label.json''' UpperCAmelCase_ = '''huggingface/label-files''' UpperCAmelCase_ = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase_ = {int(__A ) + 1: v for k, v in idalabel.items()} UpperCAmelCase_ = '''background''' UpperCAmelCase_ = idalabel UpperCAmelCase_ = {v: k for k, v in idalabel.items()} return config def A () -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase_ = Image.open(requests.get(__A , stream=__A ).raw ) return im @torch.no_grad() def A (__A : Optional[Any] , __A : List[Any] , __A : Any , __A : Union[str, Any]=False ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ = get_mobilenet_va_config(__A ) # Load 🤗 model UpperCAmelCase_ = MobileNetVaForImageClassification(__A ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(__A , __A , __A ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor UpperCAmelCase_ = MobileNetVaImageProcessor( crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , ) UpperCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' ) UpperCAmelCase_ = model(**__A ) UpperCAmelCase_ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": UpperCAmelCase_ = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ) elif model_name == "mobilenet_v1_0.75_192": UpperCAmelCase_ = torch.tensor([-3.9_440, -2.3_141, -0.3_333] ) else: UpperCAmelCase_ = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , __A , atol=1E-4 ) Path(__A ).mkdir(exist_ok=__A ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__A ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__A ) if push_to_hub: print('''Pushing to the hub...''' ) UpperCAmelCase_ = '''google/''' + model_name image_processor.push_to_hub(__A ) model.push_to_hub(__A ) if __name__ == "__main__": snake_case_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="mobilenet_v1_1.0_224", type=str, help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.", ) parser.add_argument( "--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)." ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) snake_case_ : Optional[Any] = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
51
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ :str = { '''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :Union[str, Any] = [ '''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegatronBertForCausalLM''', '''MegatronBertForMaskedLM''', '''MegatronBertForMultipleChoice''', '''MegatronBertForNextSentencePrediction''', '''MegatronBertForPreTraining''', '''MegatronBertForQuestionAnswering''', '''MegatronBertForSequenceClassification''', '''MegatronBertForTokenClassification''', '''MegatronBertModel''', '''MegatronBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
329
0
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers __lowerCamelCase : str = """3""" print("""Python version:""", sys.version) print("""transformers version:""", transformers.__version__) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) print("""NCCL version:""", torch.cuda.nccl.version()) except ImportError: print("""Torch version:""", None) try: import deepspeed print("""DeepSpeed version:""", deepspeed.__version__) except ImportError: print("""DeepSpeed version:""", None) try: import tensorflow as tf print("""TensorFlow version:""", tf.__version__) print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU"""))) print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU"""))) except ImportError: print("""TensorFlow version:""", None)
52
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = AutoConfig.from_pretrained(a__ ) _UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ ) _UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ ) _UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": _UpperCAmelCase = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": _UpperCAmelCase = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = 'TransientGlobalSelfAttention' else: raise ValueError( 'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`' ' attribute with a value from [\'local\', \'transient-global].' ) # Encoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_global_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = tax_mlp_layer_norm _UpperCAmelCase = flax_model_encoder_layer_block # Only for layer 0: _UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_global_rel_embedding # Assigning _UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale'] _UpperCAmelCase = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] _UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_pre_attention_layer_norm _UpperCAmelCase = tax_enc_dec_attention_key _UpperCAmelCase = tax_enc_dec_attention_out _UpperCAmelCase = tax_enc_dec_attention_query _UpperCAmelCase = tax_enc_dec_attention_value _UpperCAmelCase = tax_cross_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = txa_mlp_layer_norm _UpperCAmelCase = flax_model_decoder_layer_block # Decoder Normalization _UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale'] _UpperCAmelCase = txa_decoder_norm # Only for layer 0: _UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_decoder_rel_embedding # Token Embeddings _UpperCAmelCase = tax_model['target']['token_embedder']['embedding'] _UpperCAmelCase = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(a__ ) print('T5X Model was sucessfully converted!' ) if __name__ == "__main__": lowerCAmelCase__ :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) lowerCAmelCase__ :List[str] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
329
0
'''simple docstring''' import torch from transformers import AutoModel class snake_case ( torch.nn.Module ): """simple docstring""" def __init__( self : Optional[int] , __A : List[Any]="sayef/fsner-bert-base-uncased" ): super(__A , self ).__init__() __UpperCamelCase = AutoModel.from_pretrained(__A , return_dict=__A ) __UpperCamelCase = torch.nn.CosineSimilarity(3 , 1e-08 ) __UpperCamelCase = torch.nn.Softmax(dim=1 ) def _lowerCamelCase ( self : Tuple , **__A : Optional[int] ): return self.bert(**__A ).last_hidden_state def _lowerCamelCase ( self : Tuple , __A : Tuple ): return token_embeddings.sum(2 , keepdim=__A ) def _lowerCamelCase ( self : List[Any] , __A : str , __A : int , __A : str=1 ): return self.softmax(T * self.cos(__A , __A ) ) def _lowerCamelCase ( self : Optional[int] , __A : str , __A : Any ): __UpperCamelCase = W_supports['sizes'].tolist() __UpperCamelCase = W_supports['start_token_id'].item() __UpperCamelCase = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] __UpperCamelCase = self.BERT(**__A ) __UpperCamelCase = self.BERT(**__A ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = W_supports['input_ids'] == start_token_id __UpperCamelCase = W_supports['input_ids'] == end_token_id for i, size in enumerate(__A ): if i == 0: __UpperCamelCase = 0 else: __UpperCamelCase = support_sizes[i - 1] __UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]] __UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]] __UpperCamelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) __UpperCamelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: __UpperCamelCase = torch.vstack((p_starts, p_start) ) __UpperCamelCase = torch.vstack((p_ends, p_end) ) else: __UpperCamelCase = p_start __UpperCamelCase = p_end return p_starts, p_ends
53
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :List[Any] = logging.get_logger(__name__) lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : str = 'ctrl' _a : Tuple = ['past_key_values'] _a : List[Any] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = dff _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache super().__init__(**_SCREAMING_SNAKE_CASE )
329
0
"""simple docstring""" import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py a__ : Dict = '''src/transformers''' a__ : Union[str, Any] = '''docs/source/en/tasks''' def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' with open(lowerCAmelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f: __SCREAMING_SNAKE_CASE = f.readlines() # Find the start prompt. __SCREAMING_SNAKE_CASE = 0 while not lines[start_index].startswith(lowerCAmelCase_ ): start_index += 1 start_index += 1 __SCREAMING_SNAKE_CASE = start_index while not lines[end_index].startswith(lowerCAmelCase_ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. a__ : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH) a__ : int = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). a__ : Optional[Any] = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = TASK_GUIDE_TO_MODELS[task_guide] __SCREAMING_SNAKE_CASE = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCAmelCase_ , set() ) __SCREAMING_SNAKE_CASE = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=False ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = _find_text_in_file( filename=os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , ) __SCREAMING_SNAKE_CASE = get_model_list_for_task(lowerCAmelCase_ ) if current_list != new_list: if overwrite: with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" " to fix this." ) if __name__ == "__main__": a__ : List[Any] = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') a__ : Optional[Any] = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
54
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_pad def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" if not batched: _UpperCAmelCase = image_inputs[0] if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ): _UpperCAmelCase , _UpperCAmelCase = image.size else: _UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2] if w < h: _UpperCAmelCase = int(self.size['shortest_edge'] * h / w ) _UpperCAmelCase = self.size['shortest_edge'] elif w > h: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = int(self.size['shortest_edge'] * w / h ) else: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = self.size['shortest_edge'] else: _UpperCAmelCase = [] for image in image_inputs: _UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0] _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : str = DeformableDetrImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DeformableDetrImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> str: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'image_id': 39769, 'annotations': target} # encode them _UpperCAmelCase = DeformableDetrImageProcessor() _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) @slow def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target} _UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them _UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' ) _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify masks _UpperCAmelCase = 822873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
329
0
'''simple docstring''' from math import factorial def __snake_case ( UpperCAmelCase_ : int = 100 ): return sum(int(UpperCAmelCase_ ) for x in str(factorial(UpperCAmelCase_ ) ) ) if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
55
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __a ( unittest.TestCase ): _a : List[str] = JukeboxTokenizer _a : List[Any] = { 'artist': 'Zac Brown Band', 'genres': 'Country', 'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ', } @require_torch def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
329
0
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. a : int = abspath(join(dirname(__file__), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' config.addinivalue_line( '''markers''', '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''', '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''', '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''', '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''', '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''', '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main snake_case_ = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__UpperCAmelCase, id=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' if exitstatus == 5: snake_case_ = 0 # Doctest custom flag to ignore output. a : Union[str, Any] = doctest.register_optionflag('IGNORE_RESULT') a : Optional[int] = doctest.OutputChecker class a ( _lowerCamelCase ): def A_ ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ ) a : List[Any] = CustomOutputChecker a : Optional[int] = HfDoctestModule a : Tuple = HfDocTestParser
56
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__) def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser( description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' ) parser.add_argument( '--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , ) parser.add_argument( '--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' ) parser.add_argument( '--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , ) parser.add_argument( '--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , ) parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] ) parser.add_argument( '--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , ) parser.add_argument( '--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum' ' sequence length that is a multiple of 8.' , ) parser.add_argument( '--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the' ' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord' ' shards will be directly saved to a Google Cloud Storage bucket.' , ) _UpperCAmelCase = parser.parse_args() return args def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]: '''simple docstring''' def fn(a__: str ): return tokenizer(examples['text'] ) return fn def lowerCAmelCase__ ( a__: List[str] ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for i in range(len(tokenized_data['input_ids'] ) ): _UpperCAmelCase = { 'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ), 'attention_mask': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ), } _UpperCAmelCase = tf.train.Features(feature=a__ ) _UpperCAmelCase = tf.train.Example(features=a__ ) _UpperCAmelCase = example.SerializeToString() records.append(a__ ) return records def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int: '''simple docstring''' _UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _UpperCAmelCase = min(len(a__ ) , args.limit ) _UpperCAmelCase = dataset.select(range(a__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _UpperCAmelCase = os.path.join(args.output_dir , args.split ) if not os.path.exists(a__ ): os.makedirs(a__ ) else: _UpperCAmelCase = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _UpperCAmelCase = tokenize_function(a__ ) _UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(a__: Optional[int] ): # Concatenate all texts. _UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()} _UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _UpperCAmelCase = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. _UpperCAmelCase = { k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )] for k, t in concatenated_examples.items() } return result _UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 ) _UpperCAmelCase = 0 _UpperCAmelCase = 0 for shard in range(0 , len(a__ ) , args.shard_size ): _UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size] _UpperCAmelCase = len(dataset_snapshot['input_ids'] ) _UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) _UpperCAmelCase = get_serialized_examples(a__ ) with tf.io.TFRecordWriter(a__ ) as out_file: for i in range(len(a__ ) ): _UpperCAmelCase = serialized_examples[i] out_file.write(a__ ) print('Wrote file {} containing {} records'.format(a__ , a__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f: print(F'''Total {args.split} records: {total_records}''' , file=a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = parse_args() main(args)
329
0
"""simple docstring""" import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return (data["data"], data["target"]) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = XGBClassifier() classifier.fit(_UpperCamelCase , _UpperCamelCase ) return classifier def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = load_iris() __lowerCAmelCase , __lowerCAmelCase = data_handling(_UpperCamelCase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = train_test_split( _UpperCamelCase , _UpperCamelCase , test_size=0.25 ) __lowerCAmelCase = iris["target_names"] # Create an XGBoost Classifier from the training data __lowerCAmelCase = xgboost(_UpperCamelCase , _UpperCamelCase ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , display_labels=_UpperCamelCase , cmap="Blues" , normalize="true" , ) plt.title("Normalized Confusion Matrix - IRIS Dataset" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
57
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for _ in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = [] for step in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(a__ , 'schedule.bin' ) torch.save(scheduler.state_dict() , a__ ) _UpperCAmelCase = torch.load(a__ ) scheduler.load_state_dict(a__ ) return lrs @require_torch class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(100 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = Adafactor( params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , ) for _ in range(1000 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class __a ( unittest.TestCase ): _a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None _a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _a : List[Any] = 10 def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _UpperCAmelCase = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _UpperCAmelCase , _UpperCAmelCase = data _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListAlmostEqual( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule _UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' ) class __a : def __init__( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = fn def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @classmethod def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
329
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class a_ ( unittest.TestCase ): '''simple docstring''' def snake_case_( self ) -> str: _SCREAMING_SNAKE_CASE = { """task_specific_params""": { """summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4}, """summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4}, """summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6}, } } _SCREAMING_SNAKE_CASE = { """task_specific_params.summarization.length_penalty""": 1.0, """task_specific_params.summarization.max_length""": 128, """task_specific_params.summarization.min_length""": 12, """task_specific_params.summarization.num_beams""": 4, """task_specific_params.summarization_cnn.length_penalty""": 2.0, """task_specific_params.summarization_cnn.max_length""": 142, """task_specific_params.summarization_cnn.min_length""": 56, """task_specific_params.summarization_cnn.num_beams""": 4, """task_specific_params.summarization_xsum.length_penalty""": 1.0, """task_specific_params.summarization_xsum.max_length""": 62, """task_specific_params.summarization_xsum.min_length""": 11, """task_specific_params.summarization_xsum.num_beams""": 6, } self.assertEqual(flatten_dict(A ) , A ) def snake_case_( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(A ) , x.transpose() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(A , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def snake_case_( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A ) self.assertTrue(np.allclose(transpose(A ) , transpose(A ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A ) self.assertTrue(np.allclose(transpose(A , axes=(1, 2, 0) ) , transpose(A , axes=(1, 2, 0) ).numpy() ) ) @require_tf def snake_case_( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A ) self.assertTrue(np.allclose(transpose(A ) , transpose(A ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A ) self.assertTrue(np.allclose(transpose(A , axes=(1, 2, 0) ) , transpose(A , axes=(1, 2, 0) ).numpy() ) ) @require_flax def snake_case_( self ) -> str: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A ) self.assertTrue(np.allclose(transpose(A ) , np.asarray(transpose(A ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A ) self.assertTrue(np.allclose(transpose(A , axes=(1, 2, 0) ) , np.asarray(transpose(A , axes=(1, 2, 0) ) ) ) ) def snake_case_( self ) -> List[str]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(A , (4, 3) ) , np.reshape(A , (4, 3) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(A , (12, 5) ) , np.reshape(A , (12, 5) ) ) ) @require_torch def snake_case_( self ) -> Any: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A ) self.assertTrue(np.allclose(reshape(A , (4, 3) ) , reshape(A , (4, 3) ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A ) self.assertTrue(np.allclose(reshape(A , (12, 5) ) , reshape(A , (12, 5) ).numpy() ) ) @require_tf def snake_case_( self ) -> str: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A ) self.assertTrue(np.allclose(reshape(A , (4, 3) ) , reshape(A , (4, 3) ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A ) self.assertTrue(np.allclose(reshape(A , (12, 5) ) , reshape(A , (12, 5) ).numpy() ) ) @require_flax def snake_case_( self ) -> List[str]: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A ) self.assertTrue(np.allclose(reshape(A , (4, 3) ) , np.asarray(reshape(A , (4, 3) ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A ) self.assertTrue(np.allclose(reshape(A , (12, 5) ) , np.asarray(reshape(A , (12, 5) ) ) ) ) def snake_case_( self ) -> Any: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(A ) , np.squeeze(A ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(A , axis=2 ) , np.squeeze(A , axis=2 ) ) ) @require_torch def snake_case_( self ) -> str: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A ) self.assertTrue(np.allclose(squeeze(A ) , squeeze(A ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = torch.tensor(A ) self.assertTrue(np.allclose(squeeze(A , axis=2 ) , squeeze(A , axis=2 ).numpy() ) ) @require_tf def snake_case_( self ) -> List[str]: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A ) self.assertTrue(np.allclose(squeeze(A ) , squeeze(A ).numpy() ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = tf.constant(A ) self.assertTrue(np.allclose(squeeze(A , axis=2 ) , squeeze(A , axis=2 ).numpy() ) ) @require_flax def snake_case_( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A ) self.assertTrue(np.allclose(squeeze(A ) , np.asarray(squeeze(A ) ) ) ) _SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 ) _SCREAMING_SNAKE_CASE = jnp.array(A ) self.assertTrue(np.allclose(squeeze(A , axis=2 ) , np.asarray(squeeze(A , axis=2 ) ) ) ) def snake_case_( self ) -> str: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(A , axis=1 ) , np.expand_dims(A , axis=1 ) ) ) @require_torch def snake_case_( self ) -> int: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = torch.tensor(A ) self.assertTrue(np.allclose(expand_dims(A , axis=1 ) , expand_dims(A , axis=1 ).numpy() ) ) @require_tf def snake_case_( self ) -> Tuple: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = tf.constant(A ) self.assertTrue(np.allclose(expand_dims(A , axis=1 ) , expand_dims(A , axis=1 ).numpy() ) ) @require_flax def snake_case_( self ) -> Dict: _SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 ) _SCREAMING_SNAKE_CASE = jnp.array(A ) self.assertTrue(np.allclose(expand_dims(A , axis=1 ) , np.asarray(expand_dims(A , axis=1 ) ) ) )
58
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ :Any = logging.get_logger(__name__) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = original_name.split('.' )[0] _UpperCAmelCase = key.split('.' ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] ) _UpperCAmelCase = orig_block_num - offset _UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def lowerCAmelCase__ ( a__: Tuple ) -> int: '''simple docstring''' _UpperCAmelCase = OrderedDict() _UpperCAmelCase , _UpperCAmelCase = 0, 0 for key, value in state_dict.items(): if key.startswith('network' ): _UpperCAmelCase = key.replace('network' , 'poolformer.encoder' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('bias' ) and "patch_embed" not in key: patch_emb_offset += 1 _UpperCAmelCase = key[: key.find('proj' )] _UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' ) _UpperCAmelCase = key.replace('proj' , 'projection' ) if key.endswith('bias' ): total_embed_found += 1 if "patch_embeddings" in key: _UpperCAmelCase = 'poolformer.encoder.' + key if "mlp.fc1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' ) if "mlp.fc2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' ) if "norm1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' ) if "norm2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' ) if "layer_scale_1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' ) if "layer_scale_2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' ) if "head" in key: _UpperCAmelCase = key.replace('head' , 'classifier' ) _UpperCAmelCase = value return new_state_dict def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw ) return image @torch.no_grad() def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict: '''simple docstring''' _UpperCAmelCase = PoolFormerConfig() # set attributes based on model_name _UpperCAmelCase = 'huggingface/label-files' _UpperCAmelCase = model_name[-3:] _UpperCAmelCase = 1_0_0_0 _UpperCAmelCase = 'imagenet-1k-id2label.json' _UpperCAmelCase = (1, 1_0_0_0) # set config attributes _UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) ) _UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} if size == "s12": _UpperCAmelCase = [2, 2, 6, 2] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s24": _UpperCAmelCase = [4, 4, 1_2, 4] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.9 elif size == "m36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 elif size == "m48": _UpperCAmelCase = [8, 8, 2_4, 8] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 else: raise ValueError(F'''Size {size} not supported''' ) # load image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) # Prepare image _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict _UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) ) # rename keys _UpperCAmelCase = rename_keys(a__ ) # create HuggingFace model and load state dict _UpperCAmelCase = PoolFormerForImageClassification(a__ ) model.load_state_dict(a__ ) model.eval() # Define image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) _UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values # forward pass _UpperCAmelCase = model(a__ ) _UpperCAmelCase = outputs.logits # define expected logit slices for different models if size == "s12": _UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": _UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": _UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": _UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": _UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(F'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(a__ ).mkdir(exist_ok=a__ ) model.save_pretrained(a__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''poolformer_s12''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase__ :Dict = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
329
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCAmelCase ( A_ ): A__ : Optional[Any] = ["image_processor", "tokenizer"] A__ : Tuple = "AutoImageProcessor" A__ : Optional[int] = "AutoTokenizer" def __init__(self : str , snake_case__ : List[str] , snake_case__ : int ) -> Tuple: '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) snake_case : int = self.image_processor def __call__(self : Optional[Any] , snake_case__ : Dict=None , snake_case__ : Tuple=None , snake_case__ : Dict=None , **snake_case__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: snake_case : Tuple = self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if images is not None: snake_case : List[str] = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if text is not None and images is not None: snake_case : Tuple = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Dict , *snake_case__ : Any , **snake_case__ : str ) -> Tuple: '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Dict , *snake_case__ : Union[str, Any] , **snake_case__ : int ) -> Tuple: '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def _SCREAMING_SNAKE_CASE (self : int ) -> Any: '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
59
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = process _UpperCAmelCase = params def __len__( self ) -> Union[str, Any]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = self.dataset[i] _UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params ) return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = loader _UpperCAmelCase = infer _UpperCAmelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _UpperCAmelCase = None _UpperCAmelCase = loader_batch_size # Internal bookkeeping _UpperCAmelCase = None _UpperCAmelCase = None def __len__( self ) -> Any: """simple docstring""" return len(self.loader ) def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _UpperCAmelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _UpperCAmelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Convert ModelOutput to tuple first _UpperCAmelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _UpperCAmelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _UpperCAmelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE ) self._loader_batch_index += 1 return result def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _UpperCAmelCase = next(self.iterator ) _UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size # Setting internal index to unwrap the batch _UpperCAmelCase = processed _UpperCAmelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple: """simple docstring""" super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __iter__( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) _UpperCAmelCase = None return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if self.subiterator is None: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _UpperCAmelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) _UpperCAmelCase = next(self.subiterator ) return processed class __a ( UpperCAmelCase ): def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = False _UpperCAmelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator while not is_last: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size _UpperCAmelCase = processed _UpperCAmelCase = 0 while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator else: _UpperCAmelCase = processed _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) return accumulator class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = key def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return self.dataset[i][self.key] class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = keya _UpperCAmelCase = keya def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
329
0
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging snake_case__ : Optional[int] = logging.get_logger(__name__) snake_case__ : List[Any] = { '''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': ( '''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json''' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class snake_case_( a__ ): __UpperCamelCase = '''trajectory_transformer''' __UpperCamelCase = ['''past_key_values'''] __UpperCamelCase = { '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=1_0_0 , UpperCamelCase_ : int=5 , UpperCamelCase_ : Dict=1 , UpperCamelCase_ : str=1 , UpperCamelCase_ : int=2_4_9 , UpperCamelCase_ : int=6 , UpperCamelCase_ : Tuple=1_7 , UpperCamelCase_ : Optional[Any]=2_5 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : Any=1_2_8 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : int=0.0_006 , UpperCamelCase_ : List[Any]=5_1_2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : str=1E-12 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=1 , UpperCamelCase_ : str=5_0_2_5_6 , UpperCamelCase_ : Dict=5_0_2_5_6 , **UpperCamelCase_ : List[str] , ): lowerCAmelCase : Any = vocab_size lowerCAmelCase : List[Any] = action_weight lowerCAmelCase : int = reward_weight lowerCAmelCase : Optional[int] = value_weight lowerCAmelCase : Any = max_position_embeddings lowerCAmelCase : Dict = block_size lowerCAmelCase : Any = action_dim lowerCAmelCase : int = observation_dim lowerCAmelCase : int = transition_dim lowerCAmelCase : Tuple = learning_rate lowerCAmelCase : List[str] = n_layer lowerCAmelCase : Union[str, Any] = n_head lowerCAmelCase : str = n_embd lowerCAmelCase : Any = embd_pdrop lowerCAmelCase : Optional[int] = attn_pdrop lowerCAmelCase : Tuple = resid_pdrop lowerCAmelCase : int = initializer_range lowerCAmelCase : Union[str, Any] = layer_norm_eps lowerCAmelCase : List[str] = kaiming_initializer_range lowerCAmelCase : str = use_cache super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
60
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ :int = logging.get_logger(__name__) lowerCAmelCase__ :Optional[Any] = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class __a ( UpperCAmelCase ): _a : str = 'data2vec-text' def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class __a ( UpperCAmelCase ): @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
329
0
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _a = False class A_ (unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class A_ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) UpperCAmelCase_ : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) UpperCAmelCase_ : List[str] = torch.manual_seed(0 ) UpperCAmelCase_ : int = pipe.dual_guided( prompt="first prompt" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowercase_ ) UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) UpperCAmelCase_ : Optional[int] = generator.manual_seed(0 ) UpperCAmelCase_ : List[Any] = pipe.dual_guided( prompt="first prompt" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) UpperCAmelCase_ : Union[str, Any] = "cyberpunk 2077" UpperCAmelCase_ : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) UpperCAmelCase_ : List[Any] = torch.manual_seed(0 ) UpperCAmelCase_ : Tuple = pipe.dual_guided( prompt=lowercase_ , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images UpperCAmelCase_ : Union[str, Any] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ : Optional[Any] = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Any = "A painting of a squirrel eating a burger " UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Tuple = pipe.text_to_image( prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images UpperCAmelCase_ : List[str] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ : int = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Optional[Any] = pipe.image_variation(lowercase_ , generator=lowercase_ , output_type="numpy" ).images UpperCAmelCase_ : Tuple = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ : Union[str, Any] = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
61
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = patch_norm _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = is_training _UpperCAmelCase = scope _UpperCAmelCase = use_labels _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = encoder_stride _UpperCAmelCase = out_features _UpperCAmelCase = out_indices def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['stem'] _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : int = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} _a : Optional[int] = False _a : List[str] = False _a : List[str] = False _a : Optional[int] = False _a : Tuple = False def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE ) @unittest.skip('Swin does not use inputs_embeds' ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" pass @unittest.skip('Swin does not support feedforward chunking' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # Swin has a different seq_length _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 0 return t def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ): with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple() def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has''' f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.''' ) , ) recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) @require_torch class __a ( unittest.TestCase , UpperCAmelCase ): _a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else () _a : Any = MaskFormerSwinConfig def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: _UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE ) backbone.to(_SCREAMING_SNAKE_CASE ) backbone.eval() _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.attentions )
329
0
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "" ): __UpperCamelCase =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250' __UpperCamelCase =BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ ).text , 'html.parser' ) __UpperCamelCase =soup.find_all('td' , attrs='titleColumn' ) __UpperCamelCase =soup.find_all('td' , class_='ratingColumn imdbRating' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) } def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "IMDb_Top_250_Movies.csv" ): __UpperCamelCase =get_imdb_top_aaa_movies() with open(SCREAMING_SNAKE_CASE__ , 'w' , newline='' ) as out_file: __UpperCamelCase =csv.writer(SCREAMING_SNAKE_CASE__ ) writer.writerow(['Movie title', 'IMDb rating'] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
62
from collections.abc import Generator def lowerCAmelCase__ ( ) -> Generator[int, None, None]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = 0, 1 while True: _UpperCAmelCase , _UpperCAmelCase = b, a + b yield b def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int: '''simple docstring''' _UpperCAmelCase = 1 _UpperCAmelCase = fibonacci_generator() while len(str(next(a__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
329
0
'''simple docstring''' def _lowerCamelCase ( lowercase : int = 50 ) -> int: _a = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f"""{solution() = }""")
63
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 30} _UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize_and_center_crop _UpperCAmelCase = size _UpperCAmelCase = crop_pct _UpperCAmelCase = crop_size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> int: """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = PoolFormerImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 30} ) self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
329
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available A_ = { '''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongT5EncoderModel''', '''LongT5ForConditionalGeneration''', '''LongT5Model''', '''LongT5PreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''FlaxLongT5ForConditionalGeneration''', '''FlaxLongT5Model''', '''FlaxLongT5PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
64
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20} _UpperCAmelCase = do_thumbnail _UpperCAmelCase = do_align_axis _UpperCAmelCase = do_pad _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : List[str] = DonutImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DonutImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @is_flaky() def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
329
0
def lowerCAmelCase_ ( __A, __A ) -> float: '''simple docstring''' def get_matched_characters(__A, __A ) -> str: UpperCAmelCase__ = [] UpperCAmelCase__ = min(len(_stra ), len(_stra ) ) // 2 for i, l in enumerate(_stra ): UpperCAmelCase__ = int(max(0, i - limit ) ) UpperCAmelCase__ = int(min(i + limit + 1, len(_stra ) ) ) if l in _stra[left:right]: matched.append(__A ) UpperCAmelCase__ = f"""{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}""" return "".join(__A ) # matching characters UpperCAmelCase__ = get_matched_characters(__A, __A ) UpperCAmelCase__ = get_matched_characters(__A, __A ) UpperCAmelCase__ = len(__A ) # transposition UpperCAmelCase__ = ( len([(ca, ca) for ca, ca in zip(__A, __A ) if ca != ca] ) // 2 ) if not match_count: UpperCAmelCase__ = 0.0 else: UpperCAmelCase__ = ( 1 / 3 * ( match_count / len(__A ) + match_count / len(__A ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters UpperCAmelCase__ = 0 for ca, ca in zip(stra[:4], stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('hello', 'world'))
65
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :Dict = logging.get_logger(__name__) lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : List[str] = 'openai-gpt' _a : int = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = afn _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = attn_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = summary_type _UpperCAmelCase = summary_use_proj _UpperCAmelCase = summary_activation _UpperCAmelCase = summary_first_dropout _UpperCAmelCase = summary_proj_to_labels super().__init__(**_SCREAMING_SNAKE_CASE )
329
0
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } __a = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ): '''simple docstring''' for attribute in key.split(""".""" ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models snake_case_ :List[Any] = """lm_head""" snake_case_ :Union[str, Any] = getattr(_lowercase, _lowercase ) if weight_type is not None: snake_case_ :str = getattr(_lowercase, _lowercase ).shape else: snake_case_ :Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case_ :Any = value elif weight_type == "weight_g": snake_case_ :Tuple = value elif weight_type == "weight_v": snake_case_ :Optional[int] = value elif weight_type == "bias": snake_case_ :Tuple = value else: snake_case_ :List[Any] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Dict = [] snake_case_ :Union[str, Any] = fairseq_model.state_dict() snake_case_ :str = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): snake_case_ :Dict = False if "conv_layers" in name: load_conv_layer( _lowercase, _lowercase, _lowercase, _lowercase, hf_model.config.feat_extract_norm == """group""", ) snake_case_ :Dict = True else: for key, mapped_key in MAPPING.items(): snake_case_ :List[str] = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case_ :List[str] = True if "*" in mapped_key: snake_case_ :Tuple = name.split(_lowercase )[0].split(""".""" )[-2] snake_case_ :Tuple = mapped_key.replace("""*""", _lowercase ) if "weight_g" in name: snake_case_ :Dict = """weight_g""" elif "weight_v" in name: snake_case_ :Dict = """weight_v""" elif "bias" in name: snake_case_ :Optional[Any] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case_ :List[str] = """weight""" else: snake_case_ :Optional[Any] = None set_recursively(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) continue if not is_used: unused_weights.append(_lowercase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Dict = full_name.split("""conv_layers.""" )[-1] snake_case_ :List[str] = name.split(""".""" ) snake_case_ :Any = int(items[0] ) snake_case_ :str = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case_ :Union[str, Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case_ :List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) snake_case_ :int = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case_ :int = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowercase ) @torch.no_grad() def A_ ( _lowercase, _lowercase, _lowercase=None, _lowercase=None, _lowercase=True ): '''simple docstring''' if config_path is not None: snake_case_ :str = UniSpeechConfig.from_pretrained(_lowercase ) else: snake_case_ :Tuple = UniSpeechConfig() if is_finetuned: if dict_path: snake_case_ :Optional[int] = Dictionary.load_from_json(_lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ :Optional[int] = target_dict.pad_index snake_case_ :Optional[int] = target_dict.bos_index snake_case_ :Dict = target_dict.eos_index snake_case_ :List[str] = len(target_dict.symbols ) snake_case_ :int = os.path.join(_lowercase, """vocab.json""" ) if not os.path.isdir(_lowercase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowercase ) ) return os.makedirs(_lowercase, exist_ok=_lowercase ) snake_case_ :List[str] = target_dict.indices # fairseq has the <pad> and <s> switched snake_case_ :Optional[Any] = 42 snake_case_ :List[Any] = 43 with open(_lowercase, """w""", encoding="""utf-8""" ) as vocab_handle: json.dump(_lowercase, _lowercase ) snake_case_ :Union[str, Any] = WavaVecaPhonemeCTCTokenizer( _lowercase, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="""|""", do_lower_case=_lowercase, ) snake_case_ :List[Any] = True if config.feat_extract_norm == """layer""" else False snake_case_ :Any = WavaVecaFeatureExtractor( feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=_lowercase, return_attention_mask=_lowercase, ) snake_case_ :str = WavaVecaProcessor(feature_extractor=_lowercase, tokenizer=_lowercase ) processor.save_pretrained(_lowercase ) snake_case_ :Optional[Any] = UniSpeechForCTC(_lowercase ) else: snake_case_ :str = UniSpeechForPreTraining(_lowercase ) if is_finetuned: snake_case_, snake_case_, snake_case_ :int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} ) else: snake_case_, snake_case_, snake_case_ :Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) snake_case_ :Any = model[0].eval() recursively_load_weights(_lowercase, _lowercase, _lowercase ) hf_unispeech.save_pretrained(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __a = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
66
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' , [None, 'v2'] ) def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: Union[str, Any] ) -> Tuple: '''simple docstring''' _UpperCAmelCase = hf_hub_url(repo_id=a__ , path=a__ , revision=a__ ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a__ )}'''
329
0
'''simple docstring''' import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate __UpperCAmelCase =TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("", "|", "|"), datarow=DataRow("", "|", "|"), padding=1, with_header_hide=None, ) __UpperCAmelCase =[] __UpperCAmelCase =[] __UpperCAmelCase ={"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}} __UpperCAmelCase =[ { "type": "header", "text": { "type": "plain_text", "text": f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results', "emoji": True, }, } ] __UpperCAmelCase =0 for log in Path().glob("*.log"): __UpperCAmelCase =0 with open(log, "r") as f: for line in f: __UpperCAmelCase =json.loads(line) if line.get("nodeid", "") != "": __UpperCAmelCase =line["nodeid"] if line.get("duration", None) is not None: __UpperCAmelCase =f'{line["duration"]:.4f}' if line.get("outcome", "") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("_")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) __UpperCAmelCase =[] log.unlink() __UpperCAmelCase ="" __UpperCAmelCase =[] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += f"*{name[1:]}: {num_failed} failed test*\n" else: message += f"*{name[1:]}: {num_failed} failed tests*\n" __UpperCAmelCase =[] __UpperCAmelCase ={} for test in failed_tests: __UpperCAmelCase =test[0].split("::") __UpperCAmelCase =data[0].split("/")[-1] if data[0] not in filesafailed: __UpperCAmelCase =[data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) __UpperCAmelCase =[test[0] for test in failed_table] __UpperCAmelCase =list(set(files)) # Count number of instances in failed_tests __UpperCAmelCase =[] for file in individual_files: table.append([file, len(filesafailed[file])]) __UpperCAmelCase =tabulate( table, headers=["Test Location", "Num Failed"], tablefmt=hf_table_format, stralign="right", ) message += f"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3_0_0_0: __UpperCAmelCase ="Too many failed tests, please see the full report in the Action results." __UpperCAmelCase =len(err) + 1_0 __UpperCAmelCase =message[: 3_0_0_0 - offset] + f'\n...\n```\n{err}' print(f'### {message}') else: __UpperCAmelCase ="No failed tests! 🤗" print(f'## {message}') payload.append(no_error_payload) if os.environ.get("TEST_TYPE", "") != "": from slack_sdk import WebClient __UpperCAmelCase =WebClient(token=os.environ["SLACK_API_TOKEN"]) if message != "No failed tests! 🤗": __UpperCAmelCase ={ "type": "section", "text": { "type": "mrkdwn", "text": message, }, } payload.append(md_report) __UpperCAmelCase ={ "type": "section", "text": { "type": "mrkdwn", "text": "*For more details:*", }, "accessory": { "type": "button", "text": { "type": "plain_text", "text": "Check Action results", "emoji": True, }, "url": f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } payload.append(action_button) __UpperCAmelCase ={ "type": "context", "elements": [ { "type": "plain_text", "text": f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}', } ], } payload.append(date_report) __UpperCAmelCase =client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload) __UpperCAmelCase =response.data["ts"] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name __UpperCAmelCase ="" for i, row in enumerate(test_failures): if row[0] != test_class: __UpperCAmelCase =row[0] else: __UpperCAmelCase ="" __UpperCAmelCase ={ "type": "section", "text": { "type": "mrkdwn", "text": f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```', }, } client.chat_postMessage( channel="#accelerate-ci-daily", thread_ts=ts, blocks=[payload], )
67
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCAmelCase__ :Optional[int] = [ '''python''', '''tqdm''', '''regex''', '''requests''', '''packaging''', '''filelock''', '''numpy''', '''tokenizers''', '''huggingface-hub''', '''safetensors''', '''accelerate''', '''pyyaml''', ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any: '''simple docstring''' require_version(deps[pkg] , a__ )
329
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase__ = { """vocab_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt""" ), """google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""", """google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""", }, """tokenizer_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json""" ), """google/realm-orqa-nq-openqa""": ( """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-nq-reader""": ( """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-openqa""": ( """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-reader""": ( """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase__ = { """google/realm-cc-news-pretrained-embedder""": 5_1_2, """google/realm-cc-news-pretrained-encoder""": 5_1_2, """google/realm-cc-news-pretrained-scorer""": 5_1_2, """google/realm-cc-news-pretrained-openqa""": 5_1_2, """google/realm-orqa-nq-openqa""": 5_1_2, """google/realm-orqa-nq-reader""": 5_1_2, """google/realm-orqa-wq-openqa""": 5_1_2, """google/realm-orqa-wq-reader""": 5_1_2, } lowerCAmelCase__ = { """google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-reader""": {"""do_lower_case""": True}, """google/realm-orqa-wq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-wq-reader""": {"""do_lower_case""": True}, } class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_INIT_CONFIGURATION __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = RealmTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> List[str]: '''simple docstring''' super().__init__( lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , ) A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , lowercase ) != do_lower_case or normalizer_state.get("strip_accents" , lowercase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , lowercase ) != tokenize_chinese_chars ): A__ = getattr(lowercase , normalizer_state.pop("type" ) ) A__ = do_lower_case A__ = strip_accents A__ = tokenize_chinese_chars A__ = normalizer_class(**lowercase ) A__ = do_lower_case def UpperCamelCase ( self , lowercase , **lowercase ) -> Optional[int]: '''simple docstring''' A__ = PaddingStrategy.MAX_LENGTH A__ = text A__ = kwargs.pop("text_pair" , lowercase ) A__ = kwargs.pop("return_tensors" , lowercase ) A__ = { "input_ids": [], "attention_mask": [], "token_type_ids": [], } for idx, candidate_text in enumerate(lowercase ): if batch_text_pair is not None: A__ = batch_text_pair[idx] else: A__ = None A__ = super().__call__(lowercase , lowercase , return_tensors=lowercase , **lowercase ) A__ = encoded_candidates.get("input_ids" ) A__ = encoded_candidates.get("attention_mask" ) A__ = encoded_candidates.get("token_type_ids" ) if encoded_input_ids is not None: output_data["input_ids"].append(lowercase ) if encoded_attention_mask is not None: output_data["attention_mask"].append(lowercase ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(lowercase ) A__ = {key: item for key, item in output_data.items() if len(lowercase ) != 0} return BatchEncoding(lowercase , tensor_type=lowercase ) def UpperCamelCase ( self , lowercase , lowercase=None ) -> str: '''simple docstring''' A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]: '''simple docstring''' A__ = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase )
68
from __future__ import annotations def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start] while stack: _UpperCAmelCase = stack.pop() explored.add(a__ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(a__ ) return explored lowerCAmelCase__ :Tuple = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
329
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __UpperCamelCase = logging.get_logger(__name__) class UpperCamelCase ( lowerCAmelCase__ ): def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> None: warnings.warn( 'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use MobileViTImageProcessor instead.', lowerCAmelCase__, ) super().__init__(*lowerCAmelCase__, **lowerCAmelCase__)
69
import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )] _UpperCAmelCase = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('.bin' ) for f in files ) @slow @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 4 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3 assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1 _UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(_SCREAMING_SNAKE_CASE ) == num_samples def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = FlaxDDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = scheduler.create_state() _UpperCAmelCase = scheduler_state _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # With memory efficient attention _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1e-2
329
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCAmelCase ( snake_case_ , unittest.TestCase ): _lowercase: int = KandinskyVaaImgaImgPipeline _lowercase: List[str] = ['''image_embeds''', '''negative_image_embeds''', '''image'''] _lowercase: Optional[int] = [ '''image_embeds''', '''negative_image_embeds''', '''image''', ] _lowercase: Tuple = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _lowercase: List[str] = False @property def lowercase__ ( self : str ) -> List[str]: return 32 @property def lowercase__ ( self : Optional[int] ) -> List[Any]: return 32 @property def lowercase__ ( self : Tuple ) -> str: return self.time_input_dim @property def lowercase__ ( self : Any ) -> Optional[int]: return self.time_input_dim * 4 @property def lowercase__ ( self : int ) -> Optional[Any]: return 1_00 @property def lowercase__ ( self : int ) -> Dict: torch.manual_seed(0 ) _lowerCAmelCase = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } _lowerCAmelCase = UNetaDConditionModel(**__snake_case ) return model @property def lowercase__ ( self : Union[str, Any] ) -> Tuple: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase__ ( self : Dict ) -> str: torch.manual_seed(0 ) _lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def lowercase__ ( self : Optional[int] ) -> Optional[int]: _lowerCAmelCase = self.dummy_unet _lowerCAmelCase = self.dummy_movq _lowerCAmelCase = { """num_train_timesteps""": 10_00, """beta_schedule""": """linear""", """beta_start""": 0.0_00_85, """beta_end""": 0.0_12, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } _lowerCAmelCase = DDIMScheduler(**__snake_case ) _lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowercase__ ( self : int , __snake_case : List[str] , __snake_case : List[Any]=0 ) -> Union[str, Any]: _lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __snake_case ) # create init_image _lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case ) _lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) ) if str(__snake_case ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(__snake_case ) else: _lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) _lowerCAmelCase = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def lowercase__ ( self : str ) -> Tuple: _lowerCAmelCase = """cpu""" _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**__snake_case ) _lowerCAmelCase = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = pipe(**self.get_dummy_inputs(__snake_case ) ) _lowerCAmelCase = output.images _lowerCAmelCase = pipe( **self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0] _lowerCAmelCase = image[0, -3:, -3:, -1] _lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowerCAmelCase = np.array( [0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : Any ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : int ) -> Dict: _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_img2img_frog.npy""" ) _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) _lowerCAmelCase = """A red cartoon frog, 4k""" _lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__snake_case ) _lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa ) _lowerCAmelCase = pipeline.to(__snake_case ) pipeline.set_progress_bar_config(disable=__snake_case ) _lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase = pipe_prior( __snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() _lowerCAmelCase = pipeline( image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , ) _lowerCAmelCase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__snake_case , __snake_case )
70
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase__ :int = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase ) class __a ( UpperCAmelCase ): def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = {} if prompt is not None: _UpperCAmelCase = prompt if generate_kwargs is not None: _UpperCAmelCase = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _UpperCAmelCase = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) _UpperCAmelCase = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE ) if prompt is not None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError( f'''Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. ''' 'Note also that one single text can be provided for conditional image to text generation.' ) _UpperCAmelCase = self.model.config.model_type if model_type == "git": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids _UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids _UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(_SCREAMING_SNAKE_CASE ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _UpperCAmelCase = None return model_inputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]: """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE ) and all(x is None for x in model_inputs['input_ids'] ) ): _UpperCAmelCase = None if generate_kwargs is None: _UpperCAmelCase = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _UpperCAmelCase = model_inputs.pop(self.model.main_input_name ) _UpperCAmelCase = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) return model_outputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = [] for output_ids in model_outputs: _UpperCAmelCase = { 'generated_text': self.tokenizer.decode( _SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , ) } records.append(_SCREAMING_SNAKE_CASE ) return records
329
0
def A ( a_ ) -> int: if not isinstance(a_ ,a_ ) or number < 0: raise ValueError('Input must be a non-negative integer' ) __UpperCamelCase : List[Any] =0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
71
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]: '''simple docstring''' from .. import __version__ _UpperCAmelCase = take_from _UpperCAmelCase = () if not isinstance(args[0] , a__ ): _UpperCAmelCase = (args,) for attribute, version_name, message in args: if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ): raise ValueError( F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'''' F''' version {__version__} is >= {version_name}''' ) _UpperCAmelCase = None if isinstance(a__ , a__ ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(a__ ),) _UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.''' elif hasattr(a__ , a__ ): values += (getattr(a__ , a__ ),) _UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.''' elif deprecated_kwargs is None: _UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.''' if warning is not None: _UpperCAmelCase = warning + ' ' if standard_warn else '' warnings.warn(warning + message , a__ , stacklevel=a__ ) if isinstance(a__ , a__ ) and len(a__ ) > 0: _UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1] _UpperCAmelCase = call_frame.filename _UpperCAmelCase = call_frame.lineno _UpperCAmelCase = call_frame.function _UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' ) if len(a__ ) == 0: return elif len(a__ ) == 1: return values[0] return values
329
0
"""simple docstring""" from typing import Any def snake_case_ ( A_ : list ): '''simple docstring''' if not input_list: return [] _lowerCamelCase : Dict = [input_list.count(A_ ) for value in input_list] _lowerCamelCase : Union[str, Any] = max(A_ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(A_ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
72
import math lowerCAmelCase__ :Optional[int] = 1_0 lowerCAmelCase__ :Optional[Any] = 7 lowerCAmelCase__ :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS def lowerCAmelCase__ ( a__: int = 2_0 ) -> str: '''simple docstring''' _UpperCAmelCase = math.comb(a__ , a__ ) _UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ ) _UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total) return F'''{result:.9f}''' if __name__ == "__main__": print(solution(2_0))
329
0
import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a =logging.get_logger(__name__) a ="""▁""" a ={"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""} a ={ """sentencepiece_model_file""": """sentencepiece.bpe.model""", """vocab_file""": """vocab.txt""", } a ={ """vocab_file""": { """ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""", """ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""", }, """sentencepiece_model_file""": { """ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""", """ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""", }, } a ={ """ernie-m-base""": 514, """ernie-m-large""": 514, } a ={ """ernie-m-base""": {"""do_lower_case""": False}, """ernie-m-large""": {"""do_lower_case""": False}, } class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : List[str] = ["input_ids"] _UpperCAmelCase : Dict = VOCAB_FILES_NAMES _UpperCAmelCase : Any = PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : List[Any] = RESOURCE_FILES_NAMES def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="utf8" ,SCREAMING_SNAKE_CASE__ : List[Any]="[UNK]" ,SCREAMING_SNAKE_CASE__ : Any="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="[PAD]" ,SCREAMING_SNAKE_CASE__ : List[Any]="[CLS]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="[MASK]" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : List[Any] ,): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. __lowerCamelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,vocab_file=SCREAMING_SNAKE_CASE__ ,encoding=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : Tuple = do_lower_case __lowerCamelCase : Any = sentencepiece_model_ckpt __lowerCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(SCREAMING_SNAKE_CASE__) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: __lowerCamelCase : Union[str, Any] = self.load_vocab(filepath=SCREAMING_SNAKE_CASE__) else: __lowerCamelCase : List[Any] = {self.sp_model.id_to_piece(SCREAMING_SNAKE_CASE__): id for id in range(self.sp_model.get_piece_size())} __lowerCamelCase : Any = {v: k for k, v in self.vocab.items()} def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str]): if text is None: return None __lowerCamelCase : Optional[int] = self.tokenize(SCREAMING_SNAKE_CASE__) __lowerCamelCase , __lowerCamelCase : Optional[Any] = '', [] for i, ch in enumerate(SCREAMING_SNAKE_CASE__): if ch in self.SP_CHAR_MAPPING: __lowerCamelCase : Union[str, Any] = self.SP_CHAR_MAPPING.get(SCREAMING_SNAKE_CASE__) else: __lowerCamelCase : Optional[int] = unicodedata.normalize('NFKC' ,SCREAMING_SNAKE_CASE__) if self.is_whitespace(SCREAMING_SNAKE_CASE__): continue normalized_text += ch char_mapping.extend([i] * len(SCREAMING_SNAKE_CASE__)) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = normalized_text, [], 0 if self.do_lower_case: __lowerCamelCase : Optional[Any] = text.lower() for token in split_tokens: if token[:1] == "▁": __lowerCamelCase : Any = token[1:] __lowerCamelCase : Union[str, Any] = text[offset:].index(SCREAMING_SNAKE_CASE__) + offset __lowerCamelCase : Tuple = start + len(SCREAMING_SNAKE_CASE__) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1)) __lowerCamelCase : List[Any] = end return token_mapping @property def lowerCAmelCase ( self : Optional[Any]): return len(self.vocab) def lowerCAmelCase ( self : Any): return dict(self.vocab ,**self.added_tokens_encoder) def __getstate__( self : int): __lowerCamelCase : List[str] = self.__dict__.copy() __lowerCamelCase : List[str] = None return state def __setstate__( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : int = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs'): __lowerCamelCase : int = {} __lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.sentencepiece_model_ckpt) def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]): return "".join((self.SP_CHAR_MAPPING.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for c in text)) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=False ,SCREAMING_SNAKE_CASE__ : Dict=6_4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1): if self.sp_model_kwargs.get('enable_sampling') is True: __lowerCamelCase : Optional[int] = True if self.sp_model_kwargs.get('alpha') is not None: __lowerCamelCase : Any = self.sp_model_kwargs.get('alpha') if self.sp_model_kwargs.get('nbest_size') is not None: __lowerCamelCase : Tuple = self.sp_model_kwargs.get('nbest_size') if not enable_sampling: __lowerCamelCase : Dict = self.sp_model.EncodeAsPieces(SCREAMING_SNAKE_CASE__) else: __lowerCamelCase : Optional[int] = self.sp_model.SampleEncodeAsPieces(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = [] for pi, piece in enumerate(SCREAMING_SNAKE_CASE__): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(SCREAMING_SNAKE_CASE__) and pi != 0: new_pieces.append(SCREAMING_SNAKE_CASE__) continue else: continue __lowerCamelCase : List[Any] = 0 for i, chunk in enumerate(SCREAMING_SNAKE_CASE__): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(SCREAMING_SNAKE_CASE__) or self.is_punct(SCREAMING_SNAKE_CASE__): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i]) new_pieces.append(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i]) __lowerCamelCase : Any = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i]) __lowerCamelCase : Dict = i if len(SCREAMING_SNAKE_CASE__) > lst_i: new_pieces.append(piece[lst_i:]) return new_pieces def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int): __lowerCamelCase : List[str] = ''.join(SCREAMING_SNAKE_CASE__).replace(SCREAMING_SNAKE_CASE__ ,' ').strip() return out_string def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int): __lowerCamelCase : str = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = ''.join(SCREAMING_SNAKE_CASE__).replace(SCREAMING_SNAKE_CASE__ ,' ').strip() return out_string def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]): return self.vocab.get(SCREAMING_SNAKE_CASE__ ,self.vocab.get(self.unk_token)) def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[str]): return self.reverse_vocab.get(SCREAMING_SNAKE_CASE__ ,self.unk_token) def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCamelCase : Dict = [self.cls_token_id] __lowerCamelCase : Dict = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int=None): if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : List[Any]=False): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.') return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None): # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method if token_ids_a is None: # [CLS] X [SEP] return (len(SCREAMING_SNAKE_CASE__) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(SCREAMING_SNAKE_CASE__) + 1) + [1] * (len(SCREAMING_SNAKE_CASE__) + 3) def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any]): if "\u4e00" <= char <= "\u9fff": return True return False def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]): if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Any): if char in ",;:.?!~,;:。?!《》【】": return True return False def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str): if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(SCREAMING_SNAKE_CASE__) == 1: __lowerCamelCase : int = unicodedata.category(SCREAMING_SNAKE_CASE__) if cat == "Zs": return True return False def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any]): __lowerCamelCase : Any = {} with io.open(SCREAMING_SNAKE_CASE__ ,'r' ,encoding='utf-8') as f: for index, line in enumerate(SCREAMING_SNAKE_CASE__): __lowerCamelCase : int = line.rstrip('\n') __lowerCamelCase : str = int(SCREAMING_SNAKE_CASE__) return token_to_idx def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None): __lowerCamelCase : List[Any] = 0 if os.path.isdir(SCREAMING_SNAKE_CASE__): __lowerCamelCase : Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: __lowerCamelCase : str = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as writer: for token, token_index in sorted(self.vocab.items() ,key=lambda SCREAMING_SNAKE_CASE__: kv[1]): if index != token_index: logger.warning( F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." ' Please check that the vocabulary is not corrupted!') __lowerCamelCase : Dict = token_index writer.write(token + '\n') index += 1 __lowerCamelCase : Any = os.path.join(SCREAMING_SNAKE_CASE__ ,'sentencepiece.bpe.model') with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi: __lowerCamelCase : Dict = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__) return (vocab_file,)
73
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ :str = { '''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :Union[str, Any] = [ '''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegatronBertForCausalLM''', '''MegatronBertForMaskedLM''', '''MegatronBertForMultipleChoice''', '''MegatronBertForNextSentencePrediction''', '''MegatronBertForPreTraining''', '''MegatronBertForQuestionAnswering''', '''MegatronBertForSequenceClassification''', '''MegatronBertForTokenClassification''', '''MegatronBertModel''', '''MegatronBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
329
0
"""simple docstring""" from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class lowerCAmelCase_ : '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ) -> Optional[int]: raise NotImplementedError() def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: raise NotImplementedError() class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Optional[int] ,A_ : "AutoTokenizer" ,A_ : bool = False ,**A_ : Optional[int] ) -> Optional[Any]: A = tokenizer A = skip_prompt A = decode_kwargs # variables used in the streaming process A = [] A = 0 A = True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Dict ) -> int: if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError('TextStreamer only supports batch size 1' ) elif len(value.shape ) > 1: A = value[0] if self.skip_prompt and self.next_tokens_are_prompt: A = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) A = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith('\n' ): A = text[self.print_len :] A = [] A = 0 # If the last token is a CJK character, we print the characters. elif len(A_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ): A = text[self.print_len :] self.print_len += len(A_ ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: A = text[self.print_len : text.rfind(' ' ) + 1] self.print_len += len(A_ ) self.on_finalized_text(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: # Flush the cache, if it exists if len(self.token_cache ) > 0: A = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs ) A = text[self.print_len :] A = [] A = 0 else: A = '' A = True self.on_finalized_text(A_ ,stream_end=A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : str ,A_ : bool = False ) -> List[str]: print(A_ ,flush=A_ ,end='' if not stream_end else None ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Optional[int] ) -> Dict: # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Dict ,A_ : "AutoTokenizer" ,A_ : bool = False ,A_ : Optional[float] = None ,**A_ : Tuple ) -> Union[str, Any]: super().__init__(A_ ,A_ ,**A_ ) A = Queue() A = None A = timeout def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : bool = False ) -> Any: self.text_queue.put(A_ ,timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal ,timeout=self.timeout ) def __iter__( self : Optional[int] ) -> Tuple: return self def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
74
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = AutoConfig.from_pretrained(a__ ) _UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ ) _UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ ) _UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": _UpperCAmelCase = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": _UpperCAmelCase = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = 'TransientGlobalSelfAttention' else: raise ValueError( 'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`' ' attribute with a value from [\'local\', \'transient-global].' ) # Encoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_global_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = tax_mlp_layer_norm _UpperCAmelCase = flax_model_encoder_layer_block # Only for layer 0: _UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_global_rel_embedding # Assigning _UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale'] _UpperCAmelCase = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] _UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_pre_attention_layer_norm _UpperCAmelCase = tax_enc_dec_attention_key _UpperCAmelCase = tax_enc_dec_attention_out _UpperCAmelCase = tax_enc_dec_attention_query _UpperCAmelCase = tax_enc_dec_attention_value _UpperCAmelCase = tax_cross_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = txa_mlp_layer_norm _UpperCAmelCase = flax_model_decoder_layer_block # Decoder Normalization _UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale'] _UpperCAmelCase = txa_decoder_norm # Only for layer 0: _UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_decoder_rel_embedding # Token Embeddings _UpperCAmelCase = tax_model['target']['token_embedder']['embedding'] _UpperCAmelCase = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(a__ ) print('T5X Model was sucessfully converted!' ) if __name__ == "__main__": lowerCAmelCase__ :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) lowerCAmelCase__ :List[str] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
329
0
'''simple docstring''' from __future__ import annotations from scipy.special import comb # type: ignore class __UpperCamelCase : def __init__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. lowerCamelCase_ =len(lowerCAmelCase ) - 1 def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." lowerCamelCase_ =[] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree, lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(lowerCAmelCase ), 5 ) == 1 return output_values def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." lowerCamelCase_ =self.basis_function(lowerCAmelCase ) lowerCamelCase_ =0.0 lowerCamelCase_ =0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def lowercase__ ( self, lowerCAmelCase = 0.0_1 ): """simple docstring""" from matplotlib import pyplot as plt # type: ignore lowerCamelCase_ =[] # x coordinates of points to plot lowerCamelCase_ =[] # y coordinates of points to plot lowerCamelCase_ =0.0 while t <= 1: lowerCamelCase_ =self.bezier_curve_function(lowerCAmelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size lowerCamelCase_ =[i[0] for i in self.list_of_points] lowerCamelCase_ =[i[1] for i in self.list_of_points] plt.plot( lowerCAmelCase, lowerCAmelCase, color='''blue''', label='''Curve of Degree ''' + str(self.degree ), ) plt.scatter(lowerCAmelCase, lowerCAmelCase, color='''red''', label='''Control Points''' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
75
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :List[Any] = logging.get_logger(__name__) lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : str = 'ctrl' _a : Tuple = ['past_key_values'] _a : List[Any] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = dff _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache super().__init__(**_SCREAMING_SNAKE_CASE )
329
0
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType a_ = logging.get_logger(__name__) class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='vision-encoder-decoder' lowerCamelCase__ =True def __init__( self : List[str] , **a : int ) -> List[Any]: """simple docstring""" super().__init__(**a ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( F"A configuraton of type {self.model_type} cannot be instantiated because " F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" ) SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("encoder" ) SCREAMING_SNAKE_CASE : List[str] = encoder_config.pop("model_type" ) SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("decoder" ) SCREAMING_SNAKE_CASE : Optional[Any] = decoder_config.pop("model_type" ) SCREAMING_SNAKE_CASE : Dict = AutoConfig.for_model(a , **a ) SCREAMING_SNAKE_CASE : Dict = AutoConfig.for_model(a , **a ) SCREAMING_SNAKE_CASE : List[str] = True @classmethod def __UpperCamelCase ( cls : List[Any] , a : PretrainedConfig , a : PretrainedConfig , **a : Union[str, Any] ) -> PretrainedConfig: """simple docstring""" logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" ) SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : List[str] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a ) def __UpperCamelCase ( self : List[str] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : List[str] = self.encoder.to_dict() SCREAMING_SNAKE_CASE : str = self.decoder.to_dict() SCREAMING_SNAKE_CASE : str = self.__class__.model_type return output class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =version.parse('1.11' ) @property def __UpperCamelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def __UpperCamelCase ( self : List[Any] ) -> float: """simple docstring""" return 1e-4 @property def __UpperCamelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} ) class _UpperCamelCase ( __A ): '''simple docstring''' @property def __UpperCamelCase ( self : str ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = OrderedDict() SCREAMING_SNAKE_CASE : int = {0: "batch", 1: "past_decoder_sequence + sequence"} SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"} SCREAMING_SNAKE_CASE : List[str] = {0: "batch", 1: "encoder_sequence"} return common_inputs def __UpperCamelCase ( self : Optional[Any] , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional["TensorType"] = None , ) -> Mapping[str, Any]: """simple docstring""" import torch SCREAMING_SNAKE_CASE : List[str] = OrderedDict() SCREAMING_SNAKE_CASE : int = super().generate_dummy_inputs( a , batch_size=a , seq_length=a , is_pair=a , framework=a ) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = dummy_input["input_ids"].shape SCREAMING_SNAKE_CASE : Tuple = (batch, encoder_sequence, self._config.encoder_hidden_size) SCREAMING_SNAKE_CASE : List[str] = dummy_input.pop("input_ids" ) SCREAMING_SNAKE_CASE : List[Any] = dummy_input.pop("attention_mask" ) SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(a ) return common_inputs class _UpperCamelCase ( __A ): '''simple docstring''' @property def __UpperCamelCase ( self : Optional[Any] ) -> None: """simple docstring""" pass def __UpperCamelCase ( self : List[str] , a : PretrainedConfig ) -> OnnxConfig: """simple docstring""" return VisionEncoderDecoderEncoderOnnxConfig(a ) def __UpperCamelCase ( self : Dict , a : PretrainedConfig , a : PretrainedConfig , a : str = "default" ) -> OnnxConfig: """simple docstring""" SCREAMING_SNAKE_CASE : Any = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(a , a )
76
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_pad def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" if not batched: _UpperCAmelCase = image_inputs[0] if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ): _UpperCAmelCase , _UpperCAmelCase = image.size else: _UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2] if w < h: _UpperCAmelCase = int(self.size['shortest_edge'] * h / w ) _UpperCAmelCase = self.size['shortest_edge'] elif w > h: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = int(self.size['shortest_edge'] * w / h ) else: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = self.size['shortest_edge'] else: _UpperCAmelCase = [] for image in image_inputs: _UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0] _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : str = DeformableDetrImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DeformableDetrImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> str: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'image_id': 39769, 'annotations': target} # encode them _UpperCAmelCase = DeformableDetrImageProcessor() _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) @slow def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target} _UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them _UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' ) _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify masks _UpperCAmelCase = 822873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
329
0
"""simple docstring""" import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="%(message)s") def a_ ( _lowerCAmelCase : np.ndarray ): '''simple docstring''' return input_array.reshape((input_array.size, 1) ) def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : int ): '''simple docstring''' lowercase__ : Tuple = np.nan for i in range(_lowerCAmelCase ): lowercase__ : str = features[:, labels == i] lowercase__ : Optional[int] = data.mean(1 ) # Centralize the data of class i lowercase__ : List[Any] = data - column_reshape(_lowerCAmelCase ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(_lowerCAmelCase , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) lowercase__ : List[Any] = np.dot(_lowerCAmelCase , centered_data.T ) return covariance_sum / features.shape[1] def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : int ): '''simple docstring''' lowercase__ : Dict = features.mean(1 ) lowercase__ : Any = np.nan for i in range(_lowerCAmelCase ): lowercase__ : Any = features[:, labels == i] lowercase__ : Optional[int] = data.shape[1] lowercase__ : str = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(_lowerCAmelCase ) - column_reshape(_lowerCAmelCase ) , (column_reshape(_lowerCAmelCase ) - column_reshape(_lowerCAmelCase )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) lowercase__ : Optional[Any] = device_data * np.dot( column_reshape(_lowerCAmelCase ) - column_reshape(_lowerCAmelCase ) , (column_reshape(_lowerCAmelCase ) - column_reshape(_lowerCAmelCase )).T , ) return covariance_sum / features.shape[1] def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : int ): '''simple docstring''' if features.any(): lowercase__ : str = features.mean(1 ) # Center the dataset lowercase__ : Any = features - np.reshape(_lowerCAmelCase , (data_mean.size, 1) ) lowercase__ : List[str] = np.dot(_lowerCAmelCase , centered_data.T ) / features.shape[1] lowercase__ , lowercase__ : Dict = np.linalg.eigh(_lowerCAmelCase ) # Take all the columns in the reverse order (-1), and then takes only the first lowercase__ : int = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space lowercase__ : Optional[int] = np.dot(filtered_eigenvectors.T , _lowerCAmelCase ) logging.info('Principal Component Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=_lowerCAmelCase ) logging.error('Dataset empty' ) raise AssertionError def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : int , _lowerCAmelCase : int ): '''simple docstring''' assert classes > dimensions # Check if features have been already loaded if features.any: lowercase__ , lowercase__ : List[str] = eigh( covariance_between_classes(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , covariance_within_classes(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , ) lowercase__ : Optional[int] = eigenvectors[:, ::-1][:, :dimensions] lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = np.linalg.svd(_lowerCAmelCase ) lowercase__ : List[Any] = svd_matrix[:, 0:dimensions] lowercase__ : Any = np.dot(filtered_svd_matrix.T , _lowerCAmelCase ) logging.info('Linear Discriminant Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=_lowerCAmelCase ) logging.error('Dataset empty' ) raise AssertionError def a_ ( ): '''simple docstring''' lowercase__ : int = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) lowercase__ : Optional[int] = np.array([0, 0, 0, 1, 1] ) lowercase__ : Optional[Any] = 2 lowercase__ : Tuple = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(_lowerCAmelCase ) as error_info: lowercase__ : Any = linear_discriminant_analysis( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if isinstance(_lowerCAmelCase , np.ndarray ): raise AssertionError( 'Did not raise AssertionError for dimensions > classes' ) assert error_info.type is AssertionError def a_ ( ): '''simple docstring''' lowercase__ : List[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) lowercase__ : Optional[Any] = 2 lowercase__ : Any = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] ) with pytest.raises(_lowerCAmelCase ) as error_info: lowercase__ : Optional[Any] = principal_component_analysis(_lowerCAmelCase , _lowerCAmelCase ) if not np.allclose(_lowerCAmelCase , _lowerCAmelCase ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
77
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __a ( unittest.TestCase ): _a : List[str] = JukeboxTokenizer _a : List[Any] = { 'artist': 'Zac Brown Band', 'genres': 'Country', 'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ', } @require_torch def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
329
0
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class A_ : """simple docstring""" def __init__( self :Optional[Any] , lowercase_ :List[Any] , lowercase_ :Tuple=13 , lowercase_ :Union[str, Any]=7 , lowercase_ :Optional[Any]=True , lowercase_ :Optional[int]=True , lowercase_ :str=False , lowercase_ :Dict=True , lowercase_ :Optional[int]=99 , lowercase_ :List[str]=32 , lowercase_ :List[Any]=5 , lowercase_ :int=4 , lowercase_ :str=37 , lowercase_ :Optional[int]="gelu" , lowercase_ :Union[str, Any]=0.1 , lowercase_ :Optional[int]=0.1 , lowercase_ :Tuple=5_12 , lowercase_ :Optional[int]=16 , lowercase_ :Optional[int]=2 , lowercase_ :int=0.02 , lowercase_ :Union[str, Any]=3 , lowercase_ :Tuple=4 , lowercase_ :Any=None , ) -> str: UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_input_mask UpperCAmelCase = use_token_type_ids UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = num_labels UpperCAmelCase = num_choices UpperCAmelCase = scope def UpperCAmelCase__ ( self :Dict ) -> Union[str, Any]: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = None if self.use_input_mask: UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self :int ) -> Optional[Any]: return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self :List[Any] , lowercase_ :int , lowercase_ :Union[str, Any] , lowercase_ :List[str] , lowercase_ :List[Any] , lowercase_ :Tuple , lowercase_ :Dict , lowercase_ :Union[str, Any] ) -> int: UpperCAmelCase = LlamaModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ ) UpperCAmelCase = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self :Dict , lowercase_ :Dict , lowercase_ :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :List[Any] , lowercase_ :List[str] , lowercase_ :Optional[int] , lowercase_ :int , lowercase_ :Dict , lowercase_ :str , ) -> Dict: UpperCAmelCase = True UpperCAmelCase = LlamaModel(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , ) UpperCAmelCase = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , ) UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self :Dict , lowercase_ :Optional[int] , lowercase_ :Union[str, Any] , lowercase_ :Any , lowercase_ :List[Any] , lowercase_ :List[str] , lowercase_ :Dict , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Union[str, Any] , ) -> List[str]: UpperCAmelCase = LlamaForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self :str , lowercase_ :List[Any] , lowercase_ :str , lowercase_ :Optional[int] , lowercase_ :Optional[int] , lowercase_ :Optional[int] , lowercase_ :Dict , lowercase_ :Tuple , lowercase_ :Optional[int] , lowercase_ :Any , ) -> List[str]: UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = LlamaForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() # first forward pass UpperCAmelCase = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , ) UpperCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0] UpperCAmelCase = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0] # select random slice UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) def UpperCAmelCase__ ( self :Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () __UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else () __UpperCamelCase = ( { """feature-extraction""": LlamaModel, """text-classification""": LlamaForSequenceClassification, """text-generation""": LlamaForCausalLM, """zero-shot""": LlamaForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False def UpperCAmelCase__ ( self :Optional[int] ) -> str: UpperCAmelCase = LlamaModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37 ) def UpperCAmelCase__ ( self :str ) -> Optional[int]: self.config_tester.run_common_tests() def UpperCAmelCase__ ( self :List[Any] ) -> Dict: UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def UpperCAmelCase__ ( self :Tuple ) -> List[str]: UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase = type self.model_tester.create_and_check_model(*lowercase_ ) def UpperCAmelCase__ ( self :str ) -> List[str]: UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase = 3 UpperCAmelCase = input_dict['input_ids'] UpperCAmelCase = input_ids.ne(1 ).to(lowercase_ ) UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase = LlamaForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self :Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase = 3 UpperCAmelCase = 'single_label_classification' UpperCAmelCase = input_dict['input_ids'] UpperCAmelCase = input_ids.ne(1 ).to(lowercase_ ) UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase = LlamaForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self :List[str] ) -> Tuple: UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase = 3 UpperCAmelCase = 'multi_label_classification' UpperCAmelCase = input_dict['input_ids'] UpperCAmelCase = input_ids.ne(1 ).to(lowercase_ ) UpperCAmelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase = LlamaForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def UpperCAmelCase__ ( self :Dict ) -> List[str]: pass @parameterized.expand([('linear',), ('dynamic',)] ) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[int] ) -> int: UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase = ids_tensor([1, 10] , config.vocab_size ) UpperCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase = LlamaModel(lowercase_ ) original_model.to(lowercase_ ) original_model.eval() UpperCAmelCase = original_model(lowercase_ ).last_hidden_state UpperCAmelCase = original_model(lowercase_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase = {'type': scaling_type, 'factor': 10.0} UpperCAmelCase = LlamaModel(lowercase_ ) scaled_model.to(lowercase_ ) scaled_model.eval() UpperCAmelCase = scaled_model(lowercase_ ).last_hidden_state UpperCAmelCase = scaled_model(lowercase_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) ) @require_torch class A_ ( unittest.TestCase ): """simple docstring""" @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def UpperCAmelCase__ ( self :Any ) -> Union[str, Any]: UpperCAmelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] UpperCAmelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' ) UpperCAmelCase = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 UpperCAmelCase = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off UpperCAmelCase = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def UpperCAmelCase__ ( self :Optional[Any] ) -> Any: UpperCAmelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] UpperCAmelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' ) UpperCAmelCase = model(torch.tensor(lowercase_ ) ) # Expected mean on dim = -1 UpperCAmelCase = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off UpperCAmelCase = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def UpperCAmelCase__ ( self :List[str] ) -> List[str]: UpperCAmelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] UpperCAmelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' ) UpperCAmelCase = model(torch.tensor(lowercase_ ) ) # Expected mean on dim = -1 UpperCAmelCase = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off UpperCAmelCase = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def UpperCAmelCase__ ( self :int ) -> Optional[int]: UpperCAmelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] UpperCAmelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' ) UpperCAmelCase = model(torch.tensor(lowercase_ ) ) UpperCAmelCase = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 ) # fmt: off UpperCAmelCase = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('Model is curently gated' ) @slow def UpperCAmelCase__ ( self :Any ) -> int: UpperCAmelCase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' UpperCAmelCase = 'Simply put, the theory of relativity states that ' UpperCAmelCase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) UpperCAmelCase = tokenizer.encode(lowercase_ , return_tensors='pt' ) UpperCAmelCase = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=lowercase_ ) # greedy generation outputs UpperCAmelCase = model.generate(lowercase_ , max_new_tokens=64 , top_p=lowercase_ , temperature=1 , do_sample=lowercase_ ) UpperCAmelCase = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowercase_ ) self.assertEqual(lowercase_ , lowercase_ )
78
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__) def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser( description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' ) parser.add_argument( '--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , ) parser.add_argument( '--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' ) parser.add_argument( '--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , ) parser.add_argument( '--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , ) parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] ) parser.add_argument( '--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , ) parser.add_argument( '--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum' ' sequence length that is a multiple of 8.' , ) parser.add_argument( '--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the' ' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord' ' shards will be directly saved to a Google Cloud Storage bucket.' , ) _UpperCAmelCase = parser.parse_args() return args def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]: '''simple docstring''' def fn(a__: str ): return tokenizer(examples['text'] ) return fn def lowerCAmelCase__ ( a__: List[str] ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for i in range(len(tokenized_data['input_ids'] ) ): _UpperCAmelCase = { 'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ), 'attention_mask': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ), } _UpperCAmelCase = tf.train.Features(feature=a__ ) _UpperCAmelCase = tf.train.Example(features=a__ ) _UpperCAmelCase = example.SerializeToString() records.append(a__ ) return records def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int: '''simple docstring''' _UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _UpperCAmelCase = min(len(a__ ) , args.limit ) _UpperCAmelCase = dataset.select(range(a__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _UpperCAmelCase = os.path.join(args.output_dir , args.split ) if not os.path.exists(a__ ): os.makedirs(a__ ) else: _UpperCAmelCase = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _UpperCAmelCase = tokenize_function(a__ ) _UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(a__: Optional[int] ): # Concatenate all texts. _UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()} _UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _UpperCAmelCase = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. _UpperCAmelCase = { k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )] for k, t in concatenated_examples.items() } return result _UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 ) _UpperCAmelCase = 0 _UpperCAmelCase = 0 for shard in range(0 , len(a__ ) , args.shard_size ): _UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size] _UpperCAmelCase = len(dataset_snapshot['input_ids'] ) _UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) _UpperCAmelCase = get_serialized_examples(a__ ) with tf.io.TFRecordWriter(a__ ) as out_file: for i in range(len(a__ ) ): _UpperCAmelCase = serialized_examples[i] out_file.write(a__ ) print('Wrote file {} containing {} records'.format(a__ , a__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f: print(F'''Total {args.split} records: {total_records}''' , file=a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = parse_args() main(args)
329
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for _ in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = [] for step in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(a__ , 'schedule.bin' ) torch.save(scheduler.state_dict() , a__ ) _UpperCAmelCase = torch.load(a__ ) scheduler.load_state_dict(a__ ) return lrs @require_torch class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(100 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = Adafactor( params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , ) for _ in range(1000 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class __a ( unittest.TestCase ): _a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None _a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _a : List[Any] = 10 def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _UpperCAmelCase = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _UpperCAmelCase , _UpperCAmelCase = data _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListAlmostEqual( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule _UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' ) class __a : def __init__( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = fn def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @classmethod def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
329
0
'''simple docstring''' import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class lowercase_ : def __init__( self , a = "cpu" , a = "openai/clip-vit-large-patch14" ): UpperCamelCase__ = device UpperCamelCase__ = CLIPTokenizerFast.from_pretrained(a ) UpperCamelCase__ = [0.4814_5466, 0.457_8275, 0.4082_1073] UpperCamelCase__ = [0.2686_2954, 0.2613_0258, 0.2757_7711] UpperCamelCase__ = torchvision.transforms.Normalize(self.image_mean , self.image_std ) UpperCamelCase__ = torchvision.transforms.Resize(2_24 ) UpperCamelCase__ = torchvision.transforms.CenterCrop(2_24 ) def __a ( self , a ): UpperCamelCase__ = self.resize(a ) UpperCamelCase__ = self.center_crop(a ) UpperCamelCase__ = self.normalize(a ) return images def __call__( self , a=None , a=None , **a ): UpperCamelCase__ = self.tokenizer(text=a , **a ) UpperCamelCase__ = self.preprocess_img(a ) UpperCamelCase__ = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class lowercase_ ( nn.Module ): def __init__( self , a=10 , a=0.01 , a=None , a=None , a=None , a=None , a=None , a=None , a=False , a=True , a="image" , a=True , a=False , a=False , a=False , ): super().__init__() UpperCamelCase__ = None UpperCamelCase__ = device if device else get_device() if vqgan: UpperCamelCase__ = vqgan else: UpperCamelCase__ = load_vqgan(self.device , conf_path=a , ckpt_path=a ) self.vqgan.eval() if clip: UpperCamelCase__ = clip else: UpperCamelCase__ = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" ) self.clip.to(self.device ) UpperCamelCase__ = ProcessorGradientFlow(device=self.device ) UpperCamelCase__ = iterations UpperCamelCase__ = lr UpperCamelCase__ = log UpperCamelCase__ = make_grid UpperCamelCase__ = return_val UpperCamelCase__ = quantize UpperCamelCase__ = self.vqgan.decoder.z_shape def __a ( self , a=None , a=None , a=5 , a=True ): UpperCamelCase__ = [] if output_path is None: UpperCamelCase__ = "./animation.gif" if input_path is None: UpperCamelCase__ = self.save_path UpperCamelCase__ = sorted(glob(input_path + "/*" ) ) if not len(a ): raise ValueError( "No images found in save path, aborting (did you pass save_intermediate=True to the generate" " function?)" ) if len(a ) == 1: print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" ) UpperCamelCase__ = total_duration / len(a ) UpperCamelCase__ = [frame_duration] * len(a ) if extend_frames: UpperCamelCase__ = 1.5 UpperCamelCase__ = 3 for file_name in paths: if file_name.endswith(".png" ): images.append(imageio.imread(a ) ) imageio.mimsave(a , a , duration=a ) print(f'''gif saved to {output_path}''' ) def __a ( self , a=None , a=None ): if not (path or img): raise ValueError("Input either path or tensor" ) if img is not None: raise NotImplementedError UpperCamelCase__ = preprocess(Image.open(a ) , target_image_size=2_56 ).to(self.device ) UpperCamelCase__ = preprocess_vqgan(a ) UpperCamelCase__ , *UpperCamelCase__ = self.vqgan.encode(a ) return z def __a ( self , a ): UpperCamelCase__ = self.latent.detach().requires_grad_() UpperCamelCase__ = base_latent + transform_vector if self.quantize: UpperCamelCase__ , *UpperCamelCase__ = self.vqgan.quantize(a ) else: UpperCamelCase__ = trans_latent return self.vqgan.decode(a ) def __a ( self , a , a , a=None ): UpperCamelCase__ = self.clip_preprocessor(text=a , images=a , return_tensors="pt" , padding=a ) UpperCamelCase__ = self.clip(**a ) UpperCamelCase__ = clip_outputs.logits_per_image if weights is not None: UpperCamelCase__ = similarity_logits * weights return similarity_logits.sum() def __a ( self , a , a , a ): UpperCamelCase__ = self._get_clip_similarity(pos_prompts["prompts"] , a , weights=(1 / pos_prompts["weights"]) ) if neg_prompts: UpperCamelCase__ = self._get_clip_similarity(neg_prompts["prompts"] , a , weights=neg_prompts["weights"] ) else: UpperCamelCase__ = torch.tensor([1] , device=self.device ) UpperCamelCase__ = -torch.log(a ) + torch.log(a ) return loss def __a ( self , a , a , a ): UpperCamelCase__ = torch.randn_like(self.latent , requires_grad=a , device=self.device ) UpperCamelCase__ = torch.optim.Adam([vector] , lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() UpperCamelCase__ = self._add_vector(a ) UpperCamelCase__ = loop_post_process(a ) UpperCamelCase__ = self._get_CLIP_loss(a , a , a ) print("CLIP loss" , a ) if self.log: wandb.log({"CLIP Loss": clip_loss} ) clip_loss.backward(retain_graph=a ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def __a ( self , a , a , a ): wandb.init(reinit=a , project="face-editor" ) wandb.config.update({"Positive Prompts": positive_prompts} ) wandb.config.update({"Negative Prompts": negative_prompts} ) wandb.config.update({"lr": self.lr, "iterations": self.iterations} ) if image_path: UpperCamelCase__ = Image.open(a ) UpperCamelCase__ = image.resize((2_56, 2_56) ) wandb.log("Original Image" , wandb.Image(a ) ) def __a ( self , a ): if not prompts: return [] UpperCamelCase__ = [] UpperCamelCase__ = [] if isinstance(a , a ): UpperCamelCase__ = [prompt.strip() for prompt in prompts.split("|" )] for prompt in prompts: if isinstance(a , (tuple, list) ): UpperCamelCase__ = prompt[0] UpperCamelCase__ = float(prompt[1] ) elif ":" in prompt: UpperCamelCase__ , UpperCamelCase__ = prompt.split(":" ) UpperCamelCase__ = float(a ) else: UpperCamelCase__ = prompt UpperCamelCase__ = 1.0 processed_prompts.append(a ) weights.append(a ) return { "prompts": processed_prompts, "weights": torch.tensor(a , device=self.device ), } def __a ( self , a , a=None , a=None , a=True , a=False , a=True , a=True , a=None , ): if image_path: UpperCamelCase__ = self._get_latent(a ) else: UpperCamelCase__ = torch.randn(self.latent_dim , device=self.device ) if self.log: self._init_logging(a , a , a ) assert pos_prompts, "You must provide at least one positive prompt." UpperCamelCase__ = self.process_prompts(a ) UpperCamelCase__ = self.process_prompts(a ) if save_final and save_path is None: UpperCamelCase__ = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) ) if not os.path.exists(a ): os.makedirs(a ) else: UpperCamelCase__ = save_path + "_" + get_timestamp() os.makedirs(a ) UpperCamelCase__ = save_path UpperCamelCase__ = self.vqgan.decode(self.latent )[0] if show_intermediate: print("Original Image" ) show_pil(custom_to_pil(a ) ) UpperCamelCase__ = loop_post_process(a ) for iter, transformed_img in enumerate(self._optimize_CLIP(a , a , a ) ): if show_intermediate: show_pil(a ) if save_intermediate: transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) ) if self.log: wandb.log({"Image": wandb.Image(a )} ) if show_final: show_pil(a ) if save_final: transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
80
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ :Any = logging.get_logger(__name__) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = original_name.split('.' )[0] _UpperCAmelCase = key.split('.' ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] ) _UpperCAmelCase = orig_block_num - offset _UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def lowerCAmelCase__ ( a__: Tuple ) -> int: '''simple docstring''' _UpperCAmelCase = OrderedDict() _UpperCAmelCase , _UpperCAmelCase = 0, 0 for key, value in state_dict.items(): if key.startswith('network' ): _UpperCAmelCase = key.replace('network' , 'poolformer.encoder' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('bias' ) and "patch_embed" not in key: patch_emb_offset += 1 _UpperCAmelCase = key[: key.find('proj' )] _UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' ) _UpperCAmelCase = key.replace('proj' , 'projection' ) if key.endswith('bias' ): total_embed_found += 1 if "patch_embeddings" in key: _UpperCAmelCase = 'poolformer.encoder.' + key if "mlp.fc1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' ) if "mlp.fc2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' ) if "norm1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' ) if "norm2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' ) if "layer_scale_1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' ) if "layer_scale_2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' ) if "head" in key: _UpperCAmelCase = key.replace('head' , 'classifier' ) _UpperCAmelCase = value return new_state_dict def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw ) return image @torch.no_grad() def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict: '''simple docstring''' _UpperCAmelCase = PoolFormerConfig() # set attributes based on model_name _UpperCAmelCase = 'huggingface/label-files' _UpperCAmelCase = model_name[-3:] _UpperCAmelCase = 1_0_0_0 _UpperCAmelCase = 'imagenet-1k-id2label.json' _UpperCAmelCase = (1, 1_0_0_0) # set config attributes _UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) ) _UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} if size == "s12": _UpperCAmelCase = [2, 2, 6, 2] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s24": _UpperCAmelCase = [4, 4, 1_2, 4] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.9 elif size == "m36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 elif size == "m48": _UpperCAmelCase = [8, 8, 2_4, 8] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 else: raise ValueError(F'''Size {size} not supported''' ) # load image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) # Prepare image _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict _UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) ) # rename keys _UpperCAmelCase = rename_keys(a__ ) # create HuggingFace model and load state dict _UpperCAmelCase = PoolFormerForImageClassification(a__ ) model.load_state_dict(a__ ) model.eval() # Define image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) _UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values # forward pass _UpperCAmelCase = model(a__ ) _UpperCAmelCase = outputs.logits # define expected logit slices for different models if size == "s12": _UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": _UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": _UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": _UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": _UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(F'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(a__ ).mkdir(exist_ok=a__ ) model.save_pretrained(a__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''poolformer_s12''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase__ :Dict = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
329
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCamelCase_ : List[Any] = logging.get_logger(__name__) class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = ["pixel_values"] def __init__( self , __A = True , __A = None , __A = PILImageResampling.BICUBIC , __A = True , __A = True , __A = 1 / 255 , __A = None , __A = True , __A = None , __A = None , **__A , ) -> None: super().__init__(**__A ) a =size if size is not None else {'''height''': 224, '''width''': 224} a =get_size_dict(__A ) a =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} a =get_size_dict(__A , default_to_square=__A , param_name='''crop_size''' ) a =do_resize a =do_rescale a =do_normalize a =do_center_crop a =crop_size a =size a =resample a =rescale_factor a =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN a =image_std if image_std is not None else IMAGENET_DEFAULT_STD def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = PILImageResampling.BILINEAR , __A = None , **__A , ) -> np.ndarray: a =get_size_dict(__A ) if "shortest_edge" in size: a =get_resize_output_image_size(__A , size=size['''shortest_edge'''] , default_to_square=__A ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: a =(size['''height'''], size['''width''']) else: raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(__A , size=__A , resample=__A , data_format=__A , **__A ) def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , **__A , ) -> np.ndarray: a =get_size_dict(__A ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(__A , size=(size['''height'''], size['''width''']) , data_format=__A , **__A ) def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , **__A ) -> np.ndarray: return rescale(__A , scale=__A , data_format=__A , **__A ) def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray: return normalize(__A , mean=__A , std=__A , data_format=__A , **__A ) def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> BatchFeature: a =do_resize if do_resize is not None else self.do_resize a =do_rescale if do_rescale is not None else self.do_rescale a =do_normalize if do_normalize is not None else self.do_normalize a =do_center_crop if do_center_crop is not None else self.do_center_crop a =crop_size if crop_size is not None else self.crop_size a =get_size_dict(__A , param_name='''crop_size''' , default_to_square=__A ) a =resample if resample is not None else self.resample a =rescale_factor if rescale_factor is not None else self.rescale_factor a =image_mean if image_mean is not None else self.image_mean a =image_std if image_std is not None else self.image_std a =size if size is not None else self.size a =get_size_dict(__A ) if not is_batched(__A ): a =[images] if not valid_images(__A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. a =[to_numpy_array(__A ) for image in images] if do_resize: a =[self.resize(image=__A , size=__A , resample=__A ) for image in images] if do_center_crop: a =[self.center_crop(image=__A , size=__A ) for image in images] if do_rescale: a =[self.rescale(image=__A , scale=__A ) for image in images] if do_normalize: a =[self.normalize(image=__A , mean=__A , std=__A ) for image in images] a =[to_channel_dimension_format(__A , __A ) for image in images] a ={'''pixel_values''': images} return BatchFeature(data=__A , tensor_type=__A )
81
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = process _UpperCAmelCase = params def __len__( self ) -> Union[str, Any]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = self.dataset[i] _UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params ) return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = loader _UpperCAmelCase = infer _UpperCAmelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _UpperCAmelCase = None _UpperCAmelCase = loader_batch_size # Internal bookkeeping _UpperCAmelCase = None _UpperCAmelCase = None def __len__( self ) -> Any: """simple docstring""" return len(self.loader ) def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _UpperCAmelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _UpperCAmelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Convert ModelOutput to tuple first _UpperCAmelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _UpperCAmelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _UpperCAmelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE ) self._loader_batch_index += 1 return result def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _UpperCAmelCase = next(self.iterator ) _UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size # Setting internal index to unwrap the batch _UpperCAmelCase = processed _UpperCAmelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple: """simple docstring""" super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __iter__( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) _UpperCAmelCase = None return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if self.subiterator is None: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _UpperCAmelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) _UpperCAmelCase = next(self.subiterator ) return processed class __a ( UpperCAmelCase ): def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = False _UpperCAmelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator while not is_last: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size _UpperCAmelCase = processed _UpperCAmelCase = 0 while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator else: _UpperCAmelCase = processed _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) return accumulator class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = key def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return self.dataset[i][self.key] class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = keya _UpperCAmelCase = keya def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
329
0
from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class __lowerCAmelCase : __lowerCamelCase = field( metadata={'''help''': '''The output directory where the model will be written.'''} , ) __lowerCamelCase = field( metadata={ '''help''': ( '''The encoder model checkpoint for weights initialization.''' '''Don\'t set if you want to train an encoder model from scratch.''' ) } , ) __lowerCamelCase = field( metadata={ '''help''': ( '''The decoder model checkpoint for weights initialization.''' '''Don\'t set if you want to train a decoder model from scratch.''' ) } , ) __lowerCamelCase = field( default=lowerCamelCase__ , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} ) __lowerCamelCase = field( default=lowerCamelCase__ , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} ) def _UpperCAmelCase ( ): """simple docstring""" _lowerCAmelCase = HfArgumentParser((ModelArguments,) ) ((_lowerCAmelCase) , ) = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: _lowerCAmelCase = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: _lowerCAmelCase = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: _lowerCAmelCase = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: _lowerCAmelCase = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed _lowerCAmelCase = True _lowerCAmelCase = True _lowerCAmelCase = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=snake_case , decoder_config=snake_case , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens _lowerCAmelCase = decoder_config.decoder_start_token_id _lowerCAmelCase = decoder_config.pad_token_id if decoder_start_token_id is None: _lowerCAmelCase = decoder_config.bos_token_id if pad_token_id is None: _lowerCAmelCase = decoder_config.eos_token_id # This is necessary to make Flax's generate() work _lowerCAmelCase = decoder_config.eos_token_id _lowerCAmelCase = decoder_start_token_id _lowerCAmelCase = pad_token_id _lowerCAmelCase = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) _lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) _lowerCAmelCase = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
82
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ :int = logging.get_logger(__name__) lowerCAmelCase__ :Optional[Any] = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class __a ( UpperCAmelCase ): _a : str = 'data2vec-text' def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class __a ( UpperCAmelCase ): @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
329
0
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowercase__ ( lowercase ): lowercase__ = ["""image_processor""", """tokenizer"""] lowercase__ = """ViTImageProcessor""" lowercase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self : Optional[int] ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Optional[Any]=None ,**lowerCamelCase__ : Optional[Any] ): '''simple docstring''' _UpperCamelCase : int = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' ,lowerCamelCase__ ,) _UpperCamelCase : List[Any] = kwargs.pop('feature_extractor' ) _UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(lowerCamelCase__ ,lowerCamelCase__ ) def __call__( self : List[Any] ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : str=None ,lowerCamelCase__ : List[Any]=None ,**lowerCamelCase__ : List[str] ): '''simple docstring''' if text is None and visual_prompt is None and images is None: raise ValueError('You have to specify either text, visual prompt or images.' ) if text is not None and visual_prompt is not None: raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' ) if text is not None: _UpperCamelCase : Optional[int] = self.tokenizer(lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ ) if visual_prompt is not None: _UpperCamelCase : Any = self.image_processor(lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ ) if images is not None: _UpperCamelCase : Union[str, Any] = self.image_processor(lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ ) if visual_prompt is not None and images is not None: _UpperCamelCase : Dict = { 'pixel_values': image_features.pixel_values, 'conditional_pixel_values': prompt_features.pixel_values, } return encoding elif text is not None and images is not None: _UpperCamelCase : Optional[int] = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: _UpperCamelCase : Dict = { 'conditional_pixel_values': prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**lowerCamelCase__ ) ,tensor_type=lowerCamelCase__ ) def UpperCamelCase_ ( self : Tuple ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : int ): '''simple docstring''' return self.tokenizer.batch_decode(*lowerCamelCase__ ,**lowerCamelCase__ ) def UpperCamelCase_ ( self : Tuple ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : int ): '''simple docstring''' return self.tokenizer.decode(*lowerCamelCase__ ,**lowerCamelCase__ ) @property def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,lowerCamelCase__ ,) return self.image_processor_class @property def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,lowerCamelCase__ ,) return self.image_processor
83
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = patch_norm _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = is_training _UpperCAmelCase = scope _UpperCAmelCase = use_labels _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = encoder_stride _UpperCAmelCase = out_features _UpperCAmelCase = out_indices def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['stem'] _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : int = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} _a : Optional[int] = False _a : List[str] = False _a : List[str] = False _a : Optional[int] = False _a : Tuple = False def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE ) @unittest.skip('Swin does not use inputs_embeds' ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" pass @unittest.skip('Swin does not support feedforward chunking' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # Swin has a different seq_length _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 0 return t def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ): with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple() def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has''' f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.''' ) , ) recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) @require_torch class __a ( unittest.TestCase , UpperCAmelCase ): _a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else () _a : Any = MaskFormerSwinConfig def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: _UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE ) backbone.to(_SCREAMING_SNAKE_CASE ) backbone.eval() _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.attentions )
329
0
"""simple docstring""" from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) __UpperCAmelCase = 2_99_79_24_58 # Symbols __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = symbols('ct x y z') def _snake_case ( lowercase__ : float ) -> float: '''simple docstring''' if velocity > c: raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("""Speed must be greater than or equal to 1!""" ) return velocity / c def _snake_case ( lowercase__ : float ) -> float: '''simple docstring''' return 1 / sqrt(1 - beta(lowercase__ ) ** 2 ) def _snake_case ( lowercase__ : float ) -> np.ndarray: '''simple docstring''' return np.array( [ [gamma(lowercase__ ), -gamma(lowercase__ ) * beta(lowercase__ ), 0, 0], [-gamma(lowercase__ ) * beta(lowercase__ ), gamma(lowercase__ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def _snake_case ( lowercase__ : float , lowercase__ : np.ndarray | None = None ) -> np.ndarray: '''simple docstring''' if event is None: lowerCAmelCase_ :Tuple = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(lowercase__ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: __UpperCAmelCase = transform(29_97_92_45) print('Example of four vector: ') print(F"""ct' = {four_vector[0]}""") print(F"""x' = {four_vector[1]}""") print(F"""y' = {four_vector[2]}""") print(F"""z' = {four_vector[3]}""") # Substitute symbols with numerical values __UpperCAmelCase = {ct: c, x: 1, y: 1, z: 1} __UpperCAmelCase = [four_vector[i].subs(sub_dict) for i in range(4)] print(F"""\n{numerical_vector}""")
84
from collections.abc import Generator def lowerCAmelCase__ ( ) -> Generator[int, None, None]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = 0, 1 while True: _UpperCAmelCase , _UpperCAmelCase = b, a + b yield b def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int: '''simple docstring''' _UpperCAmelCase = 1 _UpperCAmelCase = fibonacci_generator() while len(str(next(a__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
329
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
85
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 30} _UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize_and_center_crop _UpperCAmelCase = size _UpperCAmelCase = crop_pct _UpperCAmelCase = crop_size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> int: """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = PoolFormerImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 30} ) self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
329
0
"""simple docstring""" from __future__ import annotations def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : List[Any] = [] __lowerCAmelCase , __lowerCAmelCase : Tuple = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) __lowerCAmelCase : Union[str, Any] = result + left + right return input_list def __lowerCAmelCase (_UpperCamelCase ): if len(_UpperCamelCase ) <= 1: return input_list __lowerCAmelCase : List[str] = list(_UpperCamelCase ) # iteration for two-way merging __lowerCAmelCase : Optional[int] = 2 while p <= len(_UpperCamelCase ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase ): __lowerCAmelCase : Optional[Any] = i __lowerCAmelCase : Optional[int] = i + p - 1 __lowerCAmelCase : Tuple = (low + high + 1) // 2 __lowerCAmelCase : Union[str, Any] = merge(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # final merge of last two parts if p * 2 >= len(_UpperCamelCase ): __lowerCAmelCase : Dict = i __lowerCAmelCase : Tuple = merge(_UpperCamelCase , 0 , _UpperCamelCase , len(_UpperCamelCase ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip() if user_input == "": lowerCamelCase__ = [] else: lowerCamelCase__ = [int(item.strip()) for item in user_input.split(""",""")] print(iter_merge_sort(unsorted))
86
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20} _UpperCAmelCase = do_thumbnail _UpperCAmelCase = do_align_axis _UpperCAmelCase = do_pad _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : List[str] = DonutImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DonutImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @is_flaky() def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
329
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } UpperCamelCase = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any): for attribute in key.split("."): lowercase__ : List[Any] = getattr(_lowerCamelCase , _lowerCamelCase) if weight_type is not None: lowercase__ : Tuple = getattr(_lowerCamelCase , _lowerCamelCase).shape else: lowercase__ : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''') if weight_type == "weight": lowercase__ : Optional[Any] = value elif weight_type == "weight_g": lowercase__ : List[str] = value elif weight_type == "weight_v": lowercase__ : List[Any] = value elif weight_type == "bias": lowercase__ : List[str] = value elif weight_type == "running_mean": lowercase__ : Any = value elif weight_type == "running_var": lowercase__ : int = value elif weight_type == "num_batches_tracked": lowercase__ : Dict = value elif weight_type == "inv_freq": lowercase__ : Tuple = value else: lowercase__ : List[str] = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''') def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]): lowercase__ : int = [] lowercase__ : Union[str, Any] = fairseq_model.state_dict() lowercase__ : Any = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): lowercase__ : Tuple = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , ) lowercase__ : Dict = True else: for key, mapped_key in MAPPING.items(): lowercase__ : Any = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: lowercase__ : List[Any] = True if "*" in mapped_key: lowercase__ : Any = name.split(_lowerCamelCase)[0].split(".")[-2] lowercase__ : Union[str, Any] = mapped_key.replace("*" , _lowerCamelCase) if "pos_bias_u" in name: lowercase__ : Any = None elif "pos_bias_v" in name: lowercase__ : List[str] = None elif "weight_g" in name: lowercase__ : List[str] = "weight_g" elif "weight_v" in name: lowercase__ : Optional[int] = "weight_v" elif "bias" in name: lowercase__ : str = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase__ : Tuple = "weight" elif "running_mean" in name: lowercase__ : List[Any] = "running_mean" elif "inv_freq" in name: lowercase__ : Optional[int] = "inv_freq" elif "running_var" in name: lowercase__ : int = "running_var" elif "num_batches_tracked" in name: lowercase__ : Optional[Any] = "num_batches_tracked" else: lowercase__ : Optional[int] = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) continue if not is_used: unused_weights.append(_lowerCamelCase) logger.warning(f'''Unused weights: {unused_weights}''') def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]): lowercase__ : Optional[Any] = full_name.split("conv_layers.")[-1] lowercase__ : Tuple = name.split(".") lowercase__ : Optional[int] = int(items[0]) lowercase__ : int = int(items[1]) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''') lowercase__ : Dict = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''') elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''') lowercase__ : List[str] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''') elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''') lowercase__ : Tuple = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''') elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''') lowercase__ : Optional[int] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''') else: unused_weights.append(_lowerCamelCase) @torch.no_grad() def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=True): if config_path is not None: lowercase__ : List[Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish") else: lowercase__ : Optional[int] = WavaVecaConformerConfig() if "rope" in checkpoint_path: lowercase__ : str = "rotary" if is_finetuned: if dict_path: lowercase__ : List[str] = Dictionary.load(_lowerCamelCase) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase__ : Optional[Any] = target_dict.pad_index lowercase__ : Optional[int] = target_dict.bos_index lowercase__ : Tuple = target_dict.eos_index lowercase__ : Tuple = len(target_dict.symbols) lowercase__ : Union[str, Any] = os.path.join(_lowerCamelCase , "vocab.json") if not os.path.isdir(_lowerCamelCase): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase)) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase) lowercase__ : int = target_dict.indices # fairseq has the <pad> and <s> switched lowercase__ : Tuple = 0 lowercase__ : Union[str, Any] = 1 with open(_lowerCamelCase , "w" , encoding="utf-8") as vocab_handle: json.dump(_lowerCamelCase , _lowerCamelCase) lowercase__ : List[str] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , ) lowercase__ : Any = True if config.feat_extract_norm == "layer" else False lowercase__ : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) lowercase__ : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase) processor.save_pretrained(_lowerCamelCase) lowercase__ : Tuple = WavaVecaConformerForCTC(_lowerCamelCase) else: lowercase__ : List[str] = WavaVecaConformerForPreTraining(_lowerCamelCase) if is_finetuned: lowercase__ , lowercase__ , lowercase__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/")[:-1])}) else: lowercase__ : List[Any] = argparse.Namespace(task="audio_pretraining") lowercase__ : Optional[int] = fairseq.tasks.setup_task(_lowerCamelCase) lowercase__ , lowercase__ , lowercase__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase) lowercase__ : Optional[Any] = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned) hf_wavavec.save_pretrained(_lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) UpperCamelCase = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
87
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :Dict = logging.get_logger(__name__) lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : List[str] = 'openai-gpt' _a : int = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = afn _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = attn_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = summary_type _UpperCAmelCase = summary_use_proj _UpperCAmelCase = summary_activation _UpperCAmelCase = summary_first_dropout _UpperCAmelCase = summary_proj_to_labels super().__init__(**_SCREAMING_SNAKE_CASE )
329
0
def a__ ( A_, A_ ): '''simple docstring''' return 1 if input_a == input_a else 0 def a__ ( ): '''simple docstring''' assert xnor_gate(0, 0 ) == 1 assert xnor_gate(0, 1 ) == 0 assert xnor_gate(1, 0 ) == 0 assert xnor_gate(1, 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
88
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' , [None, 'v2'] ) def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: Union[str, Any] ) -> Tuple: '''simple docstring''' _UpperCAmelCase = hf_hub_url(repo_id=a__ , path=a__ , revision=a__ ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a__ )}'''
329
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''caidas/swin2sr-classicalsr-x2-64''': ( '''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json''' ), } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : List[str] = 'swin2sr' lowerCAmelCase : str = { 'hidden_size': 'embed_dim', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Union[str, Any] ,_UpperCAmelCase : Tuple=64 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : Any=180 ,_UpperCAmelCase : Optional[Any]=[6, 6, 6, 6, 6, 6] ,_UpperCAmelCase : Any=[6, 6, 6, 6, 6, 6] ,_UpperCAmelCase : int=8 ,_UpperCAmelCase : Any=2.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : Tuple=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : List[str]="gelu" ,_UpperCAmelCase : Tuple=False ,_UpperCAmelCase : Optional[int]=0.02 ,_UpperCAmelCase : Union[str, Any]=1E-5 ,_UpperCAmelCase : int=2 ,_UpperCAmelCase : Tuple=1.0 ,_UpperCAmelCase : Union[str, Any]="1conv" ,_UpperCAmelCase : Tuple="pixelshuffle" ,**_UpperCAmelCase : List[str] ,): super().__init__(**_UpperCAmelCase ) _a : List[str] = image_size _a : Dict = patch_size _a : Optional[int] = num_channels _a : Optional[int] = embed_dim _a : Union[str, Any] = depths _a : Optional[int] = len(_UpperCAmelCase ) _a : Optional[Any] = num_heads _a : str = window_size _a : Optional[Any] = mlp_ratio _a : Optional[int] = qkv_bias _a : Tuple = hidden_dropout_prob _a : List[Any] = attention_probs_dropout_prob _a : Any = drop_path_rate _a : str = hidden_act _a : Tuple = use_absolute_embeddings _a : Dict = layer_norm_eps _a : Any = initializer_range _a : Optional[Any] = upscale _a : int = img_range _a : Union[str, Any] = resi_connection _a : int = upsampler
89
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCAmelCase__ :Optional[int] = [ '''python''', '''tqdm''', '''regex''', '''requests''', '''packaging''', '''filelock''', '''numpy''', '''tokenizers''', '''huggingface-hub''', '''safetensors''', '''accelerate''', '''pyyaml''', ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any: '''simple docstring''' require_version(deps[pkg] , a__ )
329
0
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = { '^': 3, '*': 2, '/': 2, '%': 2, '+': 1, '-': 1, } # Priority of each operator __lowerCamelCase = len(UpperCamelCase__ ) if (len(UpperCamelCase__ ) > 7) else 7 # Print table header for output print( 'Symbol'.center(8 ) , 'Stack'.center(UpperCamelCase__ ) , 'Postfix'.center(UpperCamelCase__ ) , sep=' | ' , ) print('-' * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(UpperCamelCase__ ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(UpperCamelCase__ ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(UpperCamelCase__ ) == 0: stack.append(UpperCamelCase__ ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(UpperCamelCase__ ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(UpperCamelCase__ ) # push x to stack print( x.center(8 ) , (''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , (''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , sep=' | ' , ) # Output in tabular format while len(UpperCamelCase__ ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( ' '.center(8 ) , (''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , (''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , sep=' | ' , ) # Output in tabular format return "".join(UpperCamelCase__ ) # return Postfix as str def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Tuple: """simple docstring""" __lowerCamelCase = list(infix[::-1] ) # reverse the infix equation for i in range(len(UpperCamelCase__ ) ): if infix[i] == "(": __lowerCamelCase = ')' # change "(" to ")" elif infix[i] == ")": __lowerCamelCase = '(' # change ")" to "(" return (infix_2_postfix(''.join(UpperCamelCase__ ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": __A = input("\nEnter an Infix Equation = ") # Input an Infix equation __A = "".join(Infix.split()) # Remove spaces from the input print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
90
from __future__ import annotations def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start] while stack: _UpperCAmelCase = stack.pop() explored.add(a__ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(a__ ) return explored lowerCAmelCase__ :Tuple = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
329
0
"""simple docstring""" import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : List[str]=13 , lowercase_ : Union[str, Any]=30 , lowercase_ : List[Any]=2 , lowercase_ : List[str]=3 , lowercase_ : Union[str, Any]=True , lowercase_ : int=True , lowercase_ : Tuple=32 , lowercase_ : Tuple=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Dict=37 , lowercase_ : Optional[int]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : List[str]=10 , lowercase_ : Dict=0.02 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = parent SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size SCREAMING_SNAKE_CASE_ : List[str] = image_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_size SCREAMING_SNAKE_CASE_ : int = num_channels SCREAMING_SNAKE_CASE_ : Optional[int] = is_training SCREAMING_SNAKE_CASE_ : Any = use_labels SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size SCREAMING_SNAKE_CASE_ : int = num_hidden_layers SCREAMING_SNAKE_CASE_ : int = num_attention_heads SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size SCREAMING_SNAKE_CASE_ : Dict = hidden_act SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : str = type_sequence_label_size SCREAMING_SNAKE_CASE_ : Any = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE_ : Any = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE_ : Tuple = num_patches + 1 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ : Union[str, Any] = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , ) return config, pixel_values def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxViTModel(config=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE_ : str = (self.image_size, self.image_size) SCREAMING_SNAKE_CASE_ : Tuple = (self.patch_size, self.patch_size) SCREAMING_SNAKE_CASE_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Any , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.type_sequence_label_size SCREAMING_SNAKE_CASE_ : Dict = FlaxViTForImageClassification(config=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images SCREAMING_SNAKE_CASE_ : int = 1 SCREAMING_SNAKE_CASE_ : List[str] = FlaxViTForImageClassification(lowercase_) SCREAMING_SNAKE_CASE_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ : Any = model(lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) : List[str] = config_and_inputs SCREAMING_SNAKE_CASE_ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxViTModelTester(self) SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : str = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ : Tuple = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowercase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_) @jax.jit def model_jitted(lowercase_ : int , **lowercase_ : Optional[Any]): return model(pixel_values=lowercase_ , **lowercase_) with self.subTest('''JIT Enabled'''): SCREAMING_SNAKE_CASE_ : Tuple = model_jitted(**lowercase_).to_tuple() with self.subTest('''JIT Disabled'''): with jax.disable_jit(): SCREAMING_SNAKE_CASE_ : Optional[int] = model_jitted(**lowercase_).to_tuple() self.assertEqual(len(lowercase_) , len(lowercase_)) for jitted_output, output in zip(lowercase_ , lowercase_): self.assertEqual(jitted_output.shape , output.shape) @slow def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE_ : List[Any] = model_class_name.from_pretrained('''google/vit-base-patch16-224''') SCREAMING_SNAKE_CASE_ : List[str] = model(np.ones((1, 3, 224, 224))) self.assertIsNotNone(lowercase_)
91
import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )] _UpperCAmelCase = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('.bin' ) for f in files ) @slow @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 4 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3 assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1 _UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(_SCREAMING_SNAKE_CASE ) == num_samples def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = FlaxDDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = scheduler.create_state() _UpperCAmelCase = scheduler_state _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # With memory efficient attention _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1e-2
329
0
from __future__ import annotations def _a ( SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ): if nth_term == "": return [""] __lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = [] for temp in range(int(SCREAMING_SNAKE_CASE_ ) ): series.append(F"""1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}""" if series else "1" ) return series if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ = int(input("""Enter the last number (nth term) of the P-Series""")) UpperCamelCase__ = int(input("""Enter the power for P-Series""")) print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""") print(p_series(nth_term, power))
92
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase__ :int = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase ) class __a ( UpperCAmelCase ): def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = {} if prompt is not None: _UpperCAmelCase = prompt if generate_kwargs is not None: _UpperCAmelCase = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _UpperCAmelCase = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) _UpperCAmelCase = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE ) if prompt is not None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError( f'''Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. ''' 'Note also that one single text can be provided for conditional image to text generation.' ) _UpperCAmelCase = self.model.config.model_type if model_type == "git": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids _UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids _UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(_SCREAMING_SNAKE_CASE ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _UpperCAmelCase = None return model_inputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]: """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE ) and all(x is None for x in model_inputs['input_ids'] ) ): _UpperCAmelCase = None if generate_kwargs is None: _UpperCAmelCase = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _UpperCAmelCase = model_inputs.pop(self.model.main_input_name ) _UpperCAmelCase = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) return model_outputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = [] for output_ids in model_outputs: _UpperCAmelCase = { 'generated_text': self.tokenizer.decode( _SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , ) } records.append(_SCREAMING_SNAKE_CASE ) return records
329
0
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def _snake_case ( self ): """simple docstring""" lowercase_ : Optional[int] = '''ylacombe/bark-small''' lowercase_ : Any = tempfile.mkdtemp() lowercase_ : Tuple = '''en_speaker_1''' lowercase_ : Dict = '''This is a test string''' lowercase_ : List[str] = '''speaker_embeddings_path.json''' lowercase_ : Optional[Any] = '''speaker_embeddings''' def _snake_case ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _snake_case ( self ): """simple docstring""" lowercase_ : Any = self.get_tokenizer() lowercase_ : str = BarkProcessor(tokenizer=__SCREAMING_SNAKE_CASE ) processor.save_pretrained(self.tmpdirname ) lowercase_ : List[str] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def _snake_case ( self ): """simple docstring""" lowercase_ : Dict = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowercase_ : Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def _snake_case ( self ): """simple docstring""" lowercase_ : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowercase_ : List[Any] = 35 lowercase_ : Union[str, Any] = 2 lowercase_ : List[Any] = 8 lowercase_ : int = { '''semantic_prompt''': np.ones(__SCREAMING_SNAKE_CASE ), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowercase_ : Tuple = processor(text=self.input_string , voice_preset=__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__SCREAMING_SNAKE_CASE , np.array([] ) ).tolist() ) # test loading voice preset from npz file lowercase_ : Dict = os.path.join(self.tmpdirname , '''file.npz''' ) np.savez(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__SCREAMING_SNAKE_CASE , np.array([] ) ).tolist() ) # test loading voice preset from the hub lowercase_ : List[str] = processor(text=self.input_string , voice_preset=self.voice_preset ) def _snake_case ( self ): """simple docstring""" lowercase_ : Tuple = self.get_tokenizer() lowercase_ : int = BarkProcessor(tokenizer=__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = processor(text=self.input_string ) lowercase_ : List[Any] = tokenizer( self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
93
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]: '''simple docstring''' from .. import __version__ _UpperCAmelCase = take_from _UpperCAmelCase = () if not isinstance(args[0] , a__ ): _UpperCAmelCase = (args,) for attribute, version_name, message in args: if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ): raise ValueError( F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'''' F''' version {__version__} is >= {version_name}''' ) _UpperCAmelCase = None if isinstance(a__ , a__ ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(a__ ),) _UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.''' elif hasattr(a__ , a__ ): values += (getattr(a__ , a__ ),) _UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.''' elif deprecated_kwargs is None: _UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.''' if warning is not None: _UpperCAmelCase = warning + ' ' if standard_warn else '' warnings.warn(warning + message , a__ , stacklevel=a__ ) if isinstance(a__ , a__ ) and len(a__ ) > 0: _UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1] _UpperCAmelCase = call_frame.filename _UpperCAmelCase = call_frame.lineno _UpperCAmelCase = call_frame.function _UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' ) if len(a__ ) == 0: return elif len(a__ ) == 1: return values[0] return values
329
0
import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) snake_case : int = logging.getLogger(__name__) def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" a :List[str] = git.Repo(search_parent_directories=UpperCAmelCase_ ) a :Tuple = { '''repo_id''': str(UpperCAmelCase_ ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), } with open(os.path.join(UpperCAmelCase_ , '''git_log.json''' ) , '''w''' ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ , indent=4 ) def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" if params.n_gpu <= 0: a :Dict = 0 a :Tuple = -1 a :Dict = True a :Optional[int] = False return assert torch.cuda.is_available() logger.info('''Initializing GPUs''' ) if params.n_gpu > 1: assert params.local_rank != -1 a :Any = int(os.environ['''WORLD_SIZE'''] ) a :Union[str, Any] = int(os.environ['''N_GPU_NODE'''] ) a :Tuple = int(os.environ['''RANK'''] ) # number of nodes / node ID a :List[Any] = params.world_size // params.n_gpu_per_node a :str = params.global_rank // params.n_gpu_per_node a :Tuple = True assert params.n_nodes == int(os.environ['''N_NODES'''] ) assert params.node_id == int(os.environ['''NODE_RANK'''] ) # local job (single GPU) else: assert params.local_rank == -1 a :Any = 1 a :Any = 0 a :Tuple = 0 a :Any = 0 a :str = 1 a :str = 1 a :Any = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode a :Optional[int] = params.node_id == 0 and params.local_rank == 0 a :Optional[Any] = params.n_nodes > 1 # summary a :List[Any] = F'''--- Global rank: {params.global_rank} - ''' logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes ) logger.info(PREFIX + '''Node ID : %i''' % params.node_id ) logger.info(PREFIX + '''Local rank : %i''' % params.local_rank ) logger.info(PREFIX + '''World size : %i''' % params.world_size ) logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node ) logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) ) logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) ) logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) ) logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('''Initializing PyTorch distributed''' ) torch.distributed.init_process_group( init_method='''env://''' , backend='''nccl''' , ) def __lowerCamelCase ( UpperCAmelCase_ : List[Any] ): """simple docstring""" np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
94
import math lowerCAmelCase__ :Optional[int] = 1_0 lowerCAmelCase__ :Optional[Any] = 7 lowerCAmelCase__ :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS def lowerCAmelCase__ ( a__: int = 2_0 ) -> str: '''simple docstring''' _UpperCAmelCase = math.comb(a__ , a__ ) _UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ ) _UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total) return F'''{result:.9f}''' if __name__ == "__main__": print(solution(2_0))
329
0
def _A ( SCREAMING_SNAKE_CASE : str ): """simple docstring""" a__ : Optional[int] =[int(SCREAMING_SNAKE_CASE ) for i in ip_va_address.split("." ) if i.isdigit()] return len(SCREAMING_SNAKE_CASE ) == 4 and all(0 <= int(SCREAMING_SNAKE_CASE ) <= 254 for octet in octets ) if __name__ == "__main__": UpperCAmelCase : Optional[int] = input().strip() UpperCAmelCase : Optional[int] = """valid""" if is_ip_va_address_valid(ip) else """invalid""" print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
95
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ :str = { '''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :Union[str, Any] = [ '''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegatronBertForCausalLM''', '''MegatronBertForMaskedLM''', '''MegatronBertForMultipleChoice''', '''MegatronBertForNextSentencePrediction''', '''MegatronBertForPreTraining''', '''MegatronBertForQuestionAnswering''', '''MegatronBertForSequenceClassification''', '''MegatronBertForTokenClassification''', '''MegatronBertModel''', '''MegatronBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
329
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json""" ), """google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""", """google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""", """google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""", """google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""", # See all REALM models at https://huggingface.co/models?filter=realm } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """realm""" def __init__( self , lowercase=30522 , lowercase=768 , lowercase=128 , lowercase=12 , lowercase=12 , lowercase=8 , lowercase=3072 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=256 , lowercase=10 , lowercase=1E-3 , lowercase=5 , lowercase=320 , lowercase=13353718 , lowercase=5000 , lowercase=1 , lowercase=0 , lowercase=2 , **lowercase , ): super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) # Common config _lowerCamelCase : Tuple = vocab_size _lowerCamelCase : int = max_position_embeddings _lowerCamelCase : List[Any] = hidden_size _lowerCamelCase : Union[str, Any] = retriever_proj_size _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : List[str] = num_attention_heads _lowerCamelCase : Union[str, Any] = num_candidates _lowerCamelCase : Dict = intermediate_size _lowerCamelCase : str = hidden_act _lowerCamelCase : int = hidden_dropout_prob _lowerCamelCase : Dict = attention_probs_dropout_prob _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : Union[str, Any] = type_vocab_size _lowerCamelCase : str = layer_norm_eps # Reader config _lowerCamelCase : List[str] = span_hidden_size _lowerCamelCase : str = max_span_width _lowerCamelCase : Any = reader_layer_norm_eps _lowerCamelCase : List[Any] = reader_beam_size _lowerCamelCase : Any = reader_seq_len # Retrieval config _lowerCamelCase : Tuple = num_block_records _lowerCamelCase : str = searcher_beam_size
96
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = AutoConfig.from_pretrained(a__ ) _UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ ) _UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ ) _UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": _UpperCAmelCase = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": _UpperCAmelCase = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = 'TransientGlobalSelfAttention' else: raise ValueError( 'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`' ' attribute with a value from [\'local\', \'transient-global].' ) # Encoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_global_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = tax_mlp_layer_norm _UpperCAmelCase = flax_model_encoder_layer_block # Only for layer 0: _UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_global_rel_embedding # Assigning _UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale'] _UpperCAmelCase = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] _UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_pre_attention_layer_norm _UpperCAmelCase = tax_enc_dec_attention_key _UpperCAmelCase = tax_enc_dec_attention_out _UpperCAmelCase = tax_enc_dec_attention_query _UpperCAmelCase = tax_enc_dec_attention_value _UpperCAmelCase = tax_cross_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = txa_mlp_layer_norm _UpperCAmelCase = flax_model_decoder_layer_block # Decoder Normalization _UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale'] _UpperCAmelCase = txa_decoder_norm # Only for layer 0: _UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_decoder_rel_embedding # Token Embeddings _UpperCAmelCase = tax_model['target']['token_embedder']['embedding'] _UpperCAmelCase = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(a__ ) print('T5X Model was sucessfully converted!' ) if __name__ == "__main__": lowerCAmelCase__ :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) lowerCAmelCase__ :List[str] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
329
0
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class lowercase : """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_=sys.maxsize ): '''simple docstring''' UpperCamelCase__ :Union[str, Any] = '''bilinear''' UpperCamelCase__ :Any = max_size UpperCamelCase__ :str = short_edge_length def __call__( self , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :Dict = [] for img in imgs: UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = img.shape[:2] # later: provide list and randomly choose index for resize UpperCamelCase__ :int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img UpperCamelCase__ :Optional[int] = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ ) if h < w: UpperCamelCase__ , UpperCamelCase__ :Any = size, scale * w else: UpperCamelCase__ , UpperCamelCase__ :Tuple = scale * h, size if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size: UpperCamelCase__ :Union[str, Any] = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase__ :Any = newh * scale UpperCamelCase__ :str = neww * scale UpperCamelCase__ :Union[str, Any] = int(neww + 0.5 ) UpperCamelCase__ :Any = int(newh + 0.5 ) if img.dtype == np.uinta: UpperCamelCase__ :str = Image.fromarray(UpperCamelCase_ ) UpperCamelCase__ :Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) UpperCamelCase__ :List[str] = np.asarray(UpperCamelCase_ ) else: UpperCamelCase__ :Optional[int] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw UpperCamelCase__ :str = nn.functional.interpolate( UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 ) img_augs.append(UpperCamelCase_ ) return img_augs class lowercase : """simple docstring""" def __init__( self , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :str = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) UpperCamelCase__ :Optional[int] = cfg.INPUT.FORMAT UpperCamelCase__ :Optional[Any] = cfg.SIZE_DIVISIBILITY UpperCamelCase__ :Tuple = cfg.PAD_VALUE UpperCamelCase__ :Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST UpperCamelCase__ :List[str] = cfg.MODEL.DEVICE UpperCamelCase__ :str = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCamelCase__ :List[str] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCamelCase__ :List[Any] = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :str = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) ) UpperCamelCase__ :Optional[Any] = [im.shape[-2:] for im in images] UpperCamelCase__ :Dict = [ nn.functional.pad( UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(UpperCamelCase_ , UpperCamelCase_ ) ] return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ ) def __call__( self , UpperCamelCase_ , UpperCamelCase_=False ): '''simple docstring''' with torch.no_grad(): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase__ :Union[str, Any] = [images] if single_image: assert len(UpperCamelCase_ ) == 1 for i in range(len(UpperCamelCase_ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge UpperCamelCase__ :Tuple = torch.tensor([im.shape[:2] for im in images] ) UpperCamelCase__ :Optional[Any] = self.aug(UpperCamelCase_ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic UpperCamelCase__ :Tuple = [self.normalizer(UpperCamelCase_ ) for x in images] # now pad them to do the following operations UpperCamelCase__ , UpperCamelCase__ :Dict = self.pad(UpperCamelCase_ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad UpperCamelCase__ :Union[str, Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def a ( __a , __a ) -> List[Any]: '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def a ( __a , __a ) -> str: '''simple docstring''' assert torch.isfinite(__a ).all(), "Box tensor contains infinite or NaN!" UpperCamelCase__ , UpperCamelCase__ :Tuple = box_size tensor[:, 0].clamp_(min=0 , max=__a ) tensor[:, 1].clamp_(min=0 , max=__a ) tensor[:, 2].clamp_(min=0 , max=__a ) tensor[:, 3].clamp_(min=0 , max=__a )
97
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :List[Any] = logging.get_logger(__name__) lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : str = 'ctrl' _a : Tuple = ['past_key_values'] _a : List[Any] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = dff _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache super().__init__(**_SCREAMING_SNAKE_CASE )
329
0
"""simple docstring""" import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): """simple docstring""" snake_case__ = StableDiffusionDiffEditPipeline snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"} snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"} snake_case__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess snake_case__ = frozenset([] ) def __lowerCAmelCase ( self : int ): torch.manual_seed(0 ) UpperCAmelCase__ = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=lowerCamelCase__ ,) UpperCAmelCase__ = DDIMScheduler( beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,) UpperCAmelCase__ = DDIMInverseScheduler( beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=lowerCamelCase__ ,set_alpha_to_zero=lowerCamelCase__ ,) torch.manual_seed(0 ) UpperCAmelCase__ = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,) torch.manual_seed(0 ) UpperCAmelCase__ = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act='gelu' ,projection_dim=512 ,) UpperCAmelCase__ = CLIPTextModel(lowerCamelCase__ ) UpperCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) UpperCAmelCase__ = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any]=0 ): UpperCAmelCase__ = floats_tensor((1, 16, 16) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) UpperCAmelCase__ = floats_tensor((1, 2, 4, 16, 16) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) if str(lowerCamelCase__ ).startswith('mps' ): UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ ) else: UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) UpperCAmelCase__ = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : str=0 ): UpperCAmelCase__ = floats_tensor((1, 3, 32, 32) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) UpperCAmelCase__ = image.cpu().permute(0 ,2 ,3 ,1 )[0] UpperCAmelCase__ = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ) if str(lowerCamelCase__ ).startswith('mps' ): UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ ) else: UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) UpperCAmelCase__ = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : Any=0 ): UpperCAmelCase__ = floats_tensor((1, 3, 32, 32) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) UpperCAmelCase__ = image.cpu().permute(0 ,2 ,3 ,1 )[0] UpperCAmelCase__ = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ) if str(lowerCamelCase__ ).startswith('mps' ): UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ ) else: UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) UpperCAmelCase__ = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def __lowerCAmelCase ( self : int ): if not hasattr(self.pipeline_class ,'_optional_components' ): return UpperCAmelCase__ = self.get_dummy_components() UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ ) UpperCAmelCase__ = pipe(**lowerCamelCase__ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase__ ) UpperCAmelCase__ = self.pipeline_class.from_pretrained(lowerCamelCase__ ) pipe_loaded.to(lowerCamelCase__ ) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase__ ) for optional_component in pipe._optional_components: self.assertTrue( getattr(lowerCamelCase__ ,lowerCamelCase__ ) is None ,f'''`{optional_component}` did not stay set to None after loading.''' ,) UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ ) UpperCAmelCase__ = pipe_loaded(**lowerCamelCase__ )[0] UpperCAmelCase__ = np.abs(output - output_loaded ).max() self.assertLess(lowerCamelCase__ ,1e-4 ) def __lowerCAmelCase ( self : Union[str, Any] ): UpperCAmelCase__ = 'cpu' UpperCAmelCase__ = self.get_dummy_components() UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCAmelCase__ = self.get_dummy_mask_inputs(lowerCamelCase__ ) UpperCAmelCase__ = pipe.generate_mask(**lowerCamelCase__ ) UpperCAmelCase__ = mask[0, -3:, -3:] self.assertEqual(mask.shape ,(1, 16, 16) ) UpperCAmelCase__ = np.array([0] * 9 ) UpperCAmelCase__ = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase__ ,1e-3 ) self.assertEqual(mask[0, -3, -4] ,0 ) def __lowerCAmelCase ( self : List[Any] ): UpperCAmelCase__ = 'cpu' UpperCAmelCase__ = self.get_dummy_components() UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCAmelCase__ = self.get_dummy_inversion_inputs(lowerCamelCase__ ) UpperCAmelCase__ = pipe.invert(**lowerCamelCase__ ).images UpperCAmelCase__ = image[0, -1, -3:, -3:] self.assertEqual(image.shape ,(2, 32, 32, 3) ) UpperCAmelCase__ = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] ,) UpperCAmelCase__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase__ ,1e-3 ) def __lowerCAmelCase ( self : Any ): super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def __lowerCAmelCase ( self : List[str] ): UpperCAmelCase__ = 'cpu' UpperCAmelCase__ = self.get_dummy_components() UpperCAmelCase__ = {'beta_start': 0.0_0_0_8_5, 'beta_end': 0.0_1_2, 'beta_schedule': 'scaled_linear'} UpperCAmelCase__ = DPMSolverMultistepScheduler(**lowerCamelCase__ ) UpperCAmelCase__ = DPMSolverMultistepInverseScheduler(**lowerCamelCase__ ) UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCAmelCase__ = self.get_dummy_inversion_inputs(lowerCamelCase__ ) UpperCAmelCase__ = pipe.invert(**lowerCamelCase__ ).images UpperCAmelCase__ = image[0, -1, -3:, -3:] self.assertEqual(image.shape ,(2, 32, 32, 3) ) UpperCAmelCase__ = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] ,) UpperCAmelCase__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase__ ,1e-3 ) @require_torch_gpu @slow class snake_case ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self : List[str] ): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def __lowerCAmelCase ( cls : int ): UpperCAmelCase__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) UpperCAmelCase__ = raw_image.convert('RGB' ).resize((768, 768) ) UpperCAmelCase__ = raw_image def __lowerCAmelCase ( self : int ): UpperCAmelCase__ = torch.manual_seed(0 ) UpperCAmelCase__ = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' ,safety_checker=lowerCamelCase__ ,torch_dtype=torch.floataa ) UpperCAmelCase__ = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase__ = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCAmelCase__ = 'a bowl of fruit' UpperCAmelCase__ = 'a bowl of pears' UpperCAmelCase__ = pipe.generate_mask( image=self.raw_image ,source_prompt=lowerCamelCase__ ,target_prompt=lowerCamelCase__ ,generator=lowerCamelCase__ ,) UpperCAmelCase__ = pipe.invert( prompt=lowerCamelCase__ ,image=self.raw_image ,inpaint_strength=0.7 ,generator=lowerCamelCase__ ).latents UpperCAmelCase__ = pipe( prompt=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,image_latents=lowerCamelCase__ ,generator=lowerCamelCase__ ,negative_prompt=lowerCamelCase__ ,inpaint_strength=0.7 ,output_type='numpy' ,).images[0] UpperCAmelCase__ = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def __lowerCAmelCase ( self : Optional[Any] ): UpperCAmelCase__ = torch.manual_seed(0 ) UpperCAmelCase__ = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' ,safety_checker=lowerCamelCase__ ,torch_dtype=torch.floataa ) UpperCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCAmelCase__ = 'a bowl of fruit' UpperCAmelCase__ = 'a bowl of pears' UpperCAmelCase__ = pipe.generate_mask( image=self.raw_image ,source_prompt=lowerCamelCase__ ,target_prompt=lowerCamelCase__ ,generator=lowerCamelCase__ ,) UpperCAmelCase__ = pipe.invert( prompt=lowerCamelCase__ ,image=self.raw_image ,inpaint_strength=0.7 ,generator=lowerCamelCase__ ,num_inference_steps=25 ,).latents UpperCAmelCase__ = pipe( prompt=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,image_latents=lowerCamelCase__ ,generator=lowerCamelCase__ ,negative_prompt=lowerCamelCase__ ,inpaint_strength=0.7 ,num_inference_steps=25 ,output_type='numpy' ,).images[0] UpperCAmelCase__ = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
98
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_pad def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" if not batched: _UpperCAmelCase = image_inputs[0] if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ): _UpperCAmelCase , _UpperCAmelCase = image.size else: _UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2] if w < h: _UpperCAmelCase = int(self.size['shortest_edge'] * h / w ) _UpperCAmelCase = self.size['shortest_edge'] elif w > h: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = int(self.size['shortest_edge'] * w / h ) else: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = self.size['shortest_edge'] else: _UpperCAmelCase = [] for image in image_inputs: _UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0] _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : str = DeformableDetrImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DeformableDetrImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> str: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'image_id': 39769, 'annotations': target} # encode them _UpperCAmelCase = DeformableDetrImageProcessor() _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) @slow def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target} _UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them _UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' ) _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify masks _UpperCAmelCase = 822873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
329
0
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowercase : int = logging.get_logger(__name__) lowercase : Dict = { """t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""", } class A__ ( __UpperCAmelCase ): """simple docstring""" __A : Union[str, Any] = '''t5''' __A : Dict = ['''past_key_values'''] __A : int = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , lowercase=3_2128 , lowercase=512 , lowercase=64 , lowercase=2048 , lowercase=6 , lowercase=None , lowercase=8 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1e-6 , lowercase=1.0 , lowercase="relu" , lowercase=True , lowercase=True , lowercase=0 , lowercase=1 , **lowercase , ) -> Optional[int]: '''simple docstring''' a__ : List[Any] = vocab_size a__ : str = d_model a__ : str = d_kv a__ : Any = d_ff a__ : Optional[Any] = num_layers a__ : Tuple = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a__ : Any = num_heads a__ : Any = relative_attention_num_buckets a__ : List[str] = relative_attention_max_distance a__ : str = dropout_rate a__ : str = layer_norm_epsilon a__ : Union[str, Any] = initializer_factor a__ : List[Any] = feed_forward_proj a__ : str = use_cache a__ : Optional[int] = self.feed_forward_proj.split('-') a__ : Optional[int] = act_info[-1] a__ : Tuple = act_info[0] == 'gated' if len(lowercase) > 1 and act_info[0] != "gated" or len(lowercase) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'') # for backwards compatibility if feed_forward_proj == "gated-gelu": a__ : int = 'gelu_new' super().__init__( pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , **lowercase , ) class A__ ( __UpperCAmelCase ): """simple docstring""" @property def __lowercase ( self) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' a__ : str = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: a__ : int = 'past_encoder_sequence + sequence' a__ : int = {0: 'batch'} a__ : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: a__ : Any = {0: 'batch', 1: 'decoder_sequence'} a__ : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowercase , direction='inputs') return common_inputs @property def __lowercase ( self) -> int: '''simple docstring''' return 13
99
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __a ( unittest.TestCase ): _a : List[str] = JukeboxTokenizer _a : List[Any] = { 'artist': 'Zac Brown Band', 'genres': 'Country', 'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ', } @require_torch def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' ) _UpperCAmelCase = tokenizer(**self.metas )['input_ids'] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
329
0
"""simple docstring""" from PIL import Image def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): def brightness(UpperCamelCase_ ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(UpperCamelCase_ ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 __magic_name__ = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
100
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__) def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser( description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' ) parser.add_argument( '--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , ) parser.add_argument( '--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' ) parser.add_argument( '--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , ) parser.add_argument( '--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , ) parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] ) parser.add_argument( '--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , ) parser.add_argument( '--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum' ' sequence length that is a multiple of 8.' , ) parser.add_argument( '--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the' ' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord' ' shards will be directly saved to a Google Cloud Storage bucket.' , ) _UpperCAmelCase = parser.parse_args() return args def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]: '''simple docstring''' def fn(a__: str ): return tokenizer(examples['text'] ) return fn def lowerCAmelCase__ ( a__: List[str] ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for i in range(len(tokenized_data['input_ids'] ) ): _UpperCAmelCase = { 'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ), 'attention_mask': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ), } _UpperCAmelCase = tf.train.Features(feature=a__ ) _UpperCAmelCase = tf.train.Example(features=a__ ) _UpperCAmelCase = example.SerializeToString() records.append(a__ ) return records def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int: '''simple docstring''' _UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _UpperCAmelCase = min(len(a__ ) , args.limit ) _UpperCAmelCase = dataset.select(range(a__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _UpperCAmelCase = os.path.join(args.output_dir , args.split ) if not os.path.exists(a__ ): os.makedirs(a__ ) else: _UpperCAmelCase = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _UpperCAmelCase = tokenize_function(a__ ) _UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(a__: Optional[int] ): # Concatenate all texts. _UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()} _UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _UpperCAmelCase = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. _UpperCAmelCase = { k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )] for k, t in concatenated_examples.items() } return result _UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 ) _UpperCAmelCase = 0 _UpperCAmelCase = 0 for shard in range(0 , len(a__ ) , args.shard_size ): _UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size] _UpperCAmelCase = len(dataset_snapshot['input_ids'] ) _UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) _UpperCAmelCase = get_serialized_examples(a__ ) with tf.io.TFRecordWriter(a__ ) as out_file: for i in range(len(a__ ) ): _UpperCAmelCase = serialized_examples[i] out_file.write(a__ ) print('Wrote file {} containing {} records'.format(a__ , a__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f: print(F'''Total {args.split} records: {total_records}''' , file=a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = parse_args() main(args)
329
0
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowercase ( SCREAMING_SNAKE_CASE__ ): def __init__( self ,A__ ,A__ ,A__ = None ,A__ = None ,A__ = False ,**A__ ,): super().__init__(features=A__ ,cache_dir=A__ ,keep_in_memory=A__ ,**A__) lowercase = Sql( cache_dir=A__ ,features=A__ ,sql=A__ ,con=A__ ,**A__ ,) def A__ ( self): lowercase = None lowercase = None lowercase = None lowercase = None self.builder.download_and_prepare( download_config=A__ ,download_mode=A__ ,verification_mode=A__ ,base_path=A__ ,) # Build dataset for splits lowercase = self.builder.as_dataset( split='''train''' ,verification_mode=A__ ,in_memory=self.keep_in_memory) return dataset class lowercase : def __init__( self ,A__ ,A__ ,A__ ,A__ = None ,A__ = None ,**A__ ,): if num_proc is not None and num_proc <= 0: raise ValueError(f'num_proc {num_proc} must be an integer > 0.') lowercase = dataset lowercase = name lowercase = con lowercase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE lowercase = num_proc lowercase = to_sql_kwargs def A__ ( self): lowercase = self.to_sql_kwargs.pop('''sql''' ,A__) lowercase = self.to_sql_kwargs.pop('''con''' ,A__) lowercase = self.to_sql_kwargs.pop('''index''' ,A__) lowercase = self._write(index=A__ ,**self.to_sql_kwargs) return written def A__ ( self ,A__): lowercase , lowercase , lowercase = args lowercase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs lowercase = query_table( table=self.dataset.data ,key=slice(A__ ,offset + self.batch_size) ,indices=self.dataset._indices ,) lowercase = batch.to_pandas() lowercase = df.to_sql(self.name ,self.con ,index=A__ ,**A__) return num_rows or len(A__) def A__ ( self ,A__ ,**A__): lowercase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset) ,self.batch_size) ,unit='''ba''' ,disable=not logging.is_progress_bar_enabled() ,desc='''Creating SQL from Arrow format''' ,): written += self._batch_sql((offset, index, to_sql_kwargs)) else: lowercase , lowercase = len(self.dataset), self.batch_size with multiprocessing.Pool(self.num_proc) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,A__ ,A__)] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='''ba''' ,disable=not logging.is_progress_bar_enabled() ,desc='''Creating SQL from Arrow format''' ,): written += num_rows return written
101
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for _ in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = [] for step in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(a__ , 'schedule.bin' ) torch.save(scheduler.state_dict() , a__ ) _UpperCAmelCase = torch.load(a__ ) scheduler.load_state_dict(a__ ) return lrs @require_torch class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(100 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = Adafactor( params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , ) for _ in range(1000 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class __a ( unittest.TestCase ): _a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None _a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _a : List[Any] = 10 def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _UpperCAmelCase = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _UpperCAmelCase , _UpperCAmelCase = data _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListAlmostEqual( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule _UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' ) class __a : def __init__( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = fn def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @classmethod def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
329
0
"""simple docstring""" def lowercase ( ) ->int: """simple docstring""" return 1 def lowercase ( _snake_case : int ) ->int: """simple docstring""" return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowercase ( _snake_case : int ) ->int: """simple docstring""" return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_snake_case ) def lowercase ( _snake_case : int ) ->int: """simple docstring""" return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_snake_case ) def lowercase ( _snake_case : int ) ->int: """simple docstring""" return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_snake_case ) def lowercase ( _snake_case : int ) ->int: """simple docstring""" return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_snake_case ) def lowercase ( _snake_case : int ) ->int: """simple docstring""" return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_snake_case ) def lowercase ( _snake_case : int ) ->int: """simple docstring""" return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_snake_case ) def lowercase ( _snake_case : int = 200 ) ->int: """simple docstring""" return two_pound(_snake_case ) if __name__ == "__main__": print(solution(int(input().strip())))
102
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ :Any = logging.get_logger(__name__) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = original_name.split('.' )[0] _UpperCAmelCase = key.split('.' ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] ) _UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] ) _UpperCAmelCase = orig_block_num - offset _UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def lowerCAmelCase__ ( a__: Tuple ) -> int: '''simple docstring''' _UpperCAmelCase = OrderedDict() _UpperCAmelCase , _UpperCAmelCase = 0, 0 for key, value in state_dict.items(): if key.startswith('network' ): _UpperCAmelCase = key.replace('network' , 'poolformer.encoder' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('bias' ) and "patch_embed" not in key: patch_emb_offset += 1 _UpperCAmelCase = key[: key.find('proj' )] _UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' ) _UpperCAmelCase = key.replace('proj' , 'projection' ) if key.endswith('bias' ): total_embed_found += 1 if "patch_embeddings" in key: _UpperCAmelCase = 'poolformer.encoder.' + key if "mlp.fc1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' ) if "mlp.fc2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' ) if "norm1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' ) if "norm2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' ) if "layer_scale_1" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' ) if "layer_scale_2" in key: _UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' ) if "head" in key: _UpperCAmelCase = key.replace('head' , 'classifier' ) _UpperCAmelCase = value return new_state_dict def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw ) return image @torch.no_grad() def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict: '''simple docstring''' _UpperCAmelCase = PoolFormerConfig() # set attributes based on model_name _UpperCAmelCase = 'huggingface/label-files' _UpperCAmelCase = model_name[-3:] _UpperCAmelCase = 1_0_0_0 _UpperCAmelCase = 'imagenet-1k-id2label.json' _UpperCAmelCase = (1, 1_0_0_0) # set config attributes _UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) ) _UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} if size == "s12": _UpperCAmelCase = [2, 2, 6, 2] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s24": _UpperCAmelCase = [4, 4, 1_2, 4] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 0.9 elif size == "s36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.9 elif size == "m36": _UpperCAmelCase = [6, 6, 1_8, 6] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 elif size == "m48": _UpperCAmelCase = [8, 8, 2_4, 8] _UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] _UpperCAmelCase = 4.0 _UpperCAmelCase = 1e-6 _UpperCAmelCase = 0.95 else: raise ValueError(F'''Size {size} not supported''' ) # load image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) # Prepare image _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict _UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) ) # rename keys _UpperCAmelCase = rename_keys(a__ ) # create HuggingFace model and load state dict _UpperCAmelCase = PoolFormerForImageClassification(a__ ) model.load_state_dict(a__ ) model.eval() # Define image processor _UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ ) _UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values # forward pass _UpperCAmelCase = model(a__ ) _UpperCAmelCase = outputs.logits # define expected logit slices for different models if size == "s12": _UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": _UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": _UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": _UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": _UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(F'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(a__ ).mkdir(exist_ok=a__ ) model.save_pretrained(a__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": lowerCAmelCase__ :str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''poolformer_s12''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase__ :Dict = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
329
0
import math def UpperCamelCase( __UpperCamelCase : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def UpperCamelCase( __UpperCamelCase : float = 0.1 ): lowerCAmelCase_ : Optional[Any] = 3 lowerCAmelCase_ : List[str] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 ,(j + 2) * (j + 2) ,j + 1 ): primes += is_prime(__UpperCamelCase ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
103
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = process _UpperCAmelCase = params def __len__( self ) -> Union[str, Any]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = self.dataset[i] _UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params ) return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = loader _UpperCAmelCase = infer _UpperCAmelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _UpperCAmelCase = None _UpperCAmelCase = loader_batch_size # Internal bookkeeping _UpperCAmelCase = None _UpperCAmelCase = None def __len__( self ) -> Any: """simple docstring""" return len(self.loader ) def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _UpperCAmelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _UpperCAmelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Convert ModelOutput to tuple first _UpperCAmelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _UpperCAmelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _UpperCAmelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE ) self._loader_batch_index += 1 return result def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _UpperCAmelCase = next(self.iterator ) _UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size # Setting internal index to unwrap the batch _UpperCAmelCase = processed _UpperCAmelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple: """simple docstring""" super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __iter__( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) _UpperCAmelCase = None return self def UpperCAmelCase__ ( self ) -> int: """simple docstring""" if self.subiterator is None: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _UpperCAmelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) _UpperCAmelCase = next(self.subiterator ) return processed class __a ( UpperCAmelCase ): def __iter__( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = iter(self.loader ) return self def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = False _UpperCAmelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator while not is_last: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size _UpperCAmelCase = processed _UpperCAmelCase = 0 while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) if is_last: return accumulator else: _UpperCAmelCase = processed _UpperCAmelCase = item.pop('is_last' ) accumulator.append(_SCREAMING_SNAKE_CASE ) return accumulator class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = key def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return self.dataset[i][self.key] class __a ( UpperCAmelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = dataset _UpperCAmelCase = keya _UpperCAmelCase = keya def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.dataset ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
329
0
'''simple docstring''' import random def _A ( A__ , A__ , A__ ): """simple docstring""" __lowercase = a[left_index] __lowercase = left_index + 1 for j in range(left_index + 1 , A__ ): if a[j] < pivot: __lowercase , __lowercase = a[i], a[j] i += 1 __lowercase , __lowercase = a[i - 1], a[left_index] return i - 1 def _A ( A__ , A__ , A__ ): """simple docstring""" if left < right: __lowercase = random.randint(A__ , right - 1 ) __lowercase , __lowercase = ( a[left], a[pivot], ) # switches the pivot with the left most bound __lowercase = partition(A__ , A__ , A__ ) quick_sort_random( A__ , A__ , A__ ) # recursive quicksort to the left of the pivot point quick_sort_random( A__ , pivot_index + 1 , A__ ) # recursive quicksort to the right of the pivot point def _A ( ): """simple docstring""" __lowercase = input('''Enter numbers separated by a comma:\n''' ).strip() __lowercase = [int(A__ ) for item in user_input.split(''',''' )] quick_sort_random(A__ , 0 , len(A__ ) ) print(A__ ) if __name__ == "__main__": main()
104
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ :int = logging.get_logger(__name__) lowerCAmelCase__ :Optional[Any] = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class __a ( UpperCAmelCase ): _a : str = 'data2vec-text' def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class __a ( UpperCAmelCase ): @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
329
0
"""simple docstring""" import math import sys def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->int: '''simple docstring''' if number != int(_lowercase ): raise ValueError("the value of input must be a natural number" ) if number < 0: raise ValueError("the value of input must not be a negative number" ) if number == 0: return 1 a : Any = [-1] * (number + 1) a : List[str] = 0 for i in range(1 , number + 1 ): a : List[str] = sys.maxsize a : List[Any] = int(math.sqrt(_lowercase ) ) for j in range(1 , root + 1 ): a : str = 1 + answers[i - (j**2)] a : Any = min(_lowercase , _lowercase ) a : Dict = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
105
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = patch_norm _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = is_training _UpperCAmelCase = scope _UpperCAmelCase = use_labels _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = encoder_stride _UpperCAmelCase = out_features _UpperCAmelCase = out_indices def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['stem'] _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : int = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} _a : Optional[int] = False _a : List[str] = False _a : List[str] = False _a : Optional[int] = False _a : Tuple = False def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE ) @unittest.skip('Swin does not use inputs_embeds' ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" pass @unittest.skip('Swin does not support feedforward chunking' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # Swin has a different seq_length _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 0 return t def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ): with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple() def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has''' f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.''' ) , ) recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) @require_torch class __a ( unittest.TestCase , UpperCAmelCase ): _a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else () _a : Any = MaskFormerSwinConfig def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: _UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE ) backbone.to(_SCREAMING_SNAKE_CASE ) backbone.eval() _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.attentions )
329
0
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( A_ , A_ ): while second != 0: lowerCAmelCase__ : Tuple = first & second first ^= second lowerCAmelCase__ : Union[str, Any] = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() __UpperCamelCase : Optional[Any] = int(input('''Enter the first number: ''').strip()) __UpperCamelCase : List[Any] = int(input('''Enter the second number: ''').strip()) print(F'''{add(first, second) = }''')
106
from collections.abc import Generator def lowerCAmelCase__ ( ) -> Generator[int, None, None]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = 0, 1 while True: _UpperCAmelCase , _UpperCAmelCase = b, a + b yield b def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int: '''simple docstring''' _UpperCAmelCase = 1 _UpperCAmelCase = fibonacci_generator() while len(str(next(a__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
329
0
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def __magic_name__ ( A : NDArray[floataa], A : NDArray[floataa], A : list[int], A : int, ): '''simple docstring''' a , a = coefficient_matrix.shape a , a = constant_matrix.shape if rowsa != colsa: a = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}""" raise ValueError(A ) if colsa != 1: a = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}""" raise ValueError(A ) if rowsa != rowsa: a = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " F"""received {rowsa}x{colsa} and {rowsa}x{colsa}""" ) raise ValueError(A ) if len(A ) != rowsa: a = ( "Number of initial values must be equal to number of rows in coefficient " F"""matrix but received {len(A )} and {rowsa}""" ) raise ValueError(A ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) a = np.concatenate( (coefficient_matrix, constant_matrix), axis=1 ) a , a = table.shape strictly_diagonally_dominant(A ) # Iterates the whole matrix for given number of times for _ in range(A ): a = [] for row in range(A ): a = 0 for col in range(A ): if col == row: a = table[row][col] elif col == cols - 1: a = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] a = (temp + val) / denom new_val.append(A ) a = new_val return [float(A ) for i in new_val] def __magic_name__ ( A : NDArray[floataa] ): '''simple docstring''' a , a = table.shape a = True for i in range(0, A ): a = 0 for j in range(0, cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
107
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 30} _UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize_and_center_crop _UpperCAmelCase = size _UpperCAmelCase = crop_pct _UpperCAmelCase = crop_size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> int: """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = PoolFormerImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 30} ) self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
329
0
"""simple docstring""" import re def a__ ( SCREAMING_SNAKE_CASE : str ): '''simple docstring''' lowerCAmelCase : Tuple = re.compile( r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" ) return bool(re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": lowerCAmelCase__ = '''0094702343221''' print(is_sri_lankan_phone_number(phone))
108
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20} _UpperCAmelCase = do_thumbnail _UpperCAmelCase = do_align_axis _UpperCAmelCase = do_pad _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : List[str] = DonutImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DonutImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @is_flaky() def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
329
0
"""simple docstring""" import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def _snake_case ( UpperCamelCase : int , UpperCamelCase : Tuple ): assert isinstance(UpperCamelCase , UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Any ): UpperCAmelCase : List[Any] = tmp_path / """cache""" UpperCAmelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase : List[Any] = SqlDatasetReader( """dataset""" , """sqlite:///""" + sqlite_path , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_sql_dataset(UpperCamelCase , UpperCamelCase ) @require_sqlalchemy @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def _snake_case ( UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : Dict ): UpperCAmelCase : Optional[Any] = tmp_path / """cache""" UpperCAmelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCAmelCase : int = features.copy() if features else default_expected_features UpperCAmelCase : str = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase : List[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_sql_dataset(UpperCamelCase , UpperCamelCase ) def _snake_case ( UpperCamelCase : List[Any] ): with contextlib.closing(sqlitea.connect(UpperCamelCase ) ) as con: UpperCAmelCase : int = con.cursor() cur.execute("""SELECT * FROM dataset""" ) for row in cur: yield row @require_sqlalchemy def _snake_case ( UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : Dict ): UpperCAmelCase : Optional[int] = tmp_path / """cache""" UpperCAmelCase : Optional[int] = os.path.join(UpperCamelCase , """tmp.sql""" ) UpperCAmelCase : int = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=UpperCamelCase ).read() SqlDatasetWriter(UpperCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write() UpperCAmelCase : List[Any] = iter_sql_file(UpperCamelCase ) UpperCAmelCase : List[str] = iter_sql_file(UpperCamelCase ) for rowa, rowa in zip(UpperCamelCase , UpperCamelCase ): assert rowa == rowa @require_sqlalchemy def _snake_case ( UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : Any ): UpperCAmelCase : int = tmp_path / """cache""" UpperCAmelCase : Dict = os.path.join(UpperCamelCase , """tmp.sql""" ) UpperCAmelCase : Union[str, Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=UpperCamelCase ).read() SqlDatasetWriter(UpperCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write() UpperCAmelCase : str = iter_sql_file(UpperCamelCase ) UpperCAmelCase : List[str] = iter_sql_file(UpperCamelCase ) for rowa, rowa in zip(UpperCamelCase , UpperCamelCase ): assert rowa == rowa @require_sqlalchemy def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ): UpperCAmelCase : List[str] = tmp_path / """cache""" UpperCAmelCase : List[str] = os.path.join(UpperCamelCase , """tmp.sql""" ) UpperCAmelCase : Union[str, Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=UpperCamelCase ).read() with pytest.raises(UpperCamelCase ): SqlDatasetWriter(UpperCamelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
109
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :Dict = logging.get_logger(__name__) lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : List[str] = 'openai-gpt' _a : int = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = afn _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = attn_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = summary_type _UpperCAmelCase = summary_use_proj _UpperCAmelCase = summary_activation _UpperCAmelCase = summary_first_dropout _UpperCAmelCase = summary_proj_to_labels super().__init__(**_SCREAMING_SNAKE_CASE )
329
0
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def a( A : List[str] ) -> Any: """simple docstring""" if is_torch_version("<" , "2.0.0" ) or not hasattr(a__ , "_dynamo" ): return False return isinstance(a__ , torch._dynamo.eval_frame.OptimizedModule ) def a( A : Tuple , A : bool = True ) -> List[Any]: """simple docstring""" a = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) a = is_compiled_module(a__ ) if is_compiled: a = model a = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(a__ , a__ ): a = model.module if not keep_fpaa_wrapper: a = getattr(a__ , "forward" ) a = model.__dict__.pop("_original_forward" , a__ ) if original_forward is not None: while hasattr(a__ , "__wrapped__" ): a = forward.__wrapped__ if forward == original_forward: break a = forward if getattr(a__ , "_converted_to_transformer_engine" , a__ ): convert_model(a__ , to_transformer_engine=a__ ) if is_compiled: a = model a = compiled_model return model def a( ) -> str: """simple docstring""" PartialState().wait_for_everyone() def a( A : Any , A : Optional[Any] ) -> Tuple: """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(a__ , a__ ) elif PartialState().local_process_index == 0: torch.save(a__ , a__ ) @contextmanager def a( **A : Union[str, Any] ) -> List[str]: """simple docstring""" for key, value in kwargs.items(): a = str(a__ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def a( A : str ) -> Tuple: """simple docstring""" if not hasattr(a__ , "__qualname__" ) and not hasattr(a__ , "__name__" ): a = getattr(a__ , "__class__" , a__ ) if hasattr(a__ , "__qualname__" ): return obj.__qualname__ if hasattr(a__ , "__name__" ): return obj.__name__ return str(a__ ) def a( A : Optional[Any] , A : Union[str, Any] ) -> Optional[int]: """simple docstring""" for key, value in source.items(): if isinstance(a__ , a__ ): a = destination.setdefault(a__ , {} ) merge_dicts(a__ , a__ ) else: a = value return destination def a( A : int = None ) -> bool: """simple docstring""" if port is None: a = 2_9500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(("localhost", port) ) == 0
227
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' , [None, 'v2'] ) def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: Union[str, Any] ) -> Tuple: '''simple docstring''' _UpperCAmelCase = hf_hub_url(repo_id=a__ , path=a__ , revision=a__ ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a__ )}'''
329
0
'''simple docstring''' import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup lowercase__ : Union[str, Any] = { '''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''' } def a__ ( lowercase : str = "dhaka", lowercase : int = 5 ) -> int: """simple docstring""" _UpperCamelCase = min(a__, 50 ) # Prevent abuse! _UpperCamelCase = { '''q''': query, '''tbm''': '''isch''', '''hl''': '''en''', '''ijn''': '''0''', } _UpperCamelCase = requests.get('''https://www.google.com/search''', params=a__, headers=a__ ) _UpperCamelCase = BeautifulSoup(html.text, '''html.parser''' ) _UpperCamelCase = ''''''.join( re.findall(r'''AF_initDataCallback\(([^<]+)\);''', str(soup.select('''script''' ) ) ) ) _UpperCamelCase = json.dumps(a__ ) _UpperCamelCase = json.loads(a__ ) _UpperCamelCase = re.findall( r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''', a__, ) if not matched_google_image_data: return 0 _UpperCamelCase = re.sub( r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''', '''''', str(a__ ), ) _UpperCamelCase = re.findall( r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''', a__, ) for index, fixed_full_res_image in enumerate(a__ ): if index >= max_images: return index _UpperCamelCase = bytes(a__, '''ascii''' ).decode( '''unicode-escape''' ) _UpperCamelCase = bytes(a__, '''ascii''' ).decode( '''unicode-escape''' ) _UpperCamelCase = urllib.request.build_opener() _UpperCamelCase = [ ( '''User-Agent''', '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''', ) ] urllib.request.install_opener(a__ ) _UpperCamelCase = F"""query_{query.replace(' ', '_' )}""" if not os.path.exists(a__ ): os.makedirs(a__ ) urllib.request.urlretrieve( # noqa: S310 a__, F"""{path_name}/original_size_img_{index}.jpg""" ) return index if __name__ == "__main__": try: lowercase__ : List[str] = download_images_from_google_query(sys.argv[1]) print(F"""{image_count} images were downloaded to disk.""") except IndexError: print('Please provide a search term.') raise
324
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCAmelCase__ :Optional[int] = [ '''python''', '''tqdm''', '''regex''', '''requests''', '''packaging''', '''filelock''', '''numpy''', '''tokenizers''', '''huggingface-hub''', '''safetensors''', '''accelerate''', '''pyyaml''', ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any: '''simple docstring''' require_version(deps[pkg] , a__ )
329
0
from __future__ import annotations from typing import Any class _A( snake_case__ ): """simple docstring""" pass class _A: """simple docstring""" def __init__( self , _A ): __A : Dict = data __A : Optional[Any] = None def __iter__( self ): __A : Dict = self __A : Optional[Any] = [] while node: if node in visited: raise ContainsLoopError visited.append(_SCREAMING_SNAKE_CASE ) yield node.data __A : Tuple = node.next_node @property def UpperCAmelCase_ ( self ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": UpperCAmelCase : Optional[int] = Node(1) UpperCAmelCase : Dict = Node(2) UpperCAmelCase : Union[str, Any] = Node(3) UpperCAmelCase : Union[str, Any] = Node(4) print(root_node.has_loop) # False UpperCAmelCase : str = root_node.next_node print(root_node.has_loop) # True UpperCAmelCase : Any = Node(5) UpperCAmelCase : Optional[int] = Node(6) UpperCAmelCase : Optional[Any] = Node(5) UpperCAmelCase : Optional[Any] = Node(6) print(root_node.has_loop) # False UpperCAmelCase : str = Node(1) print(root_node.has_loop) # False
280
from __future__ import annotations def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start] while stack: _UpperCAmelCase = stack.pop() explored.add(a__ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(a__ ) return explored lowerCAmelCase__ :Tuple = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
329
0
'''simple docstring''' def a_ ( _lowerCAmelCase ) -> list: if any(not isinstance(a__ ,a__ ) or x < 0 for x in sequence ): raise TypeError('Sequence must be list of non-negative integers' ) for _ in range(len(a__ ) ): for i, (rod_upper, rod_lower) in enumerate(zip(a__ ,sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
208
import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )] _UpperCAmelCase = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('.bin' ) for f in files ) @slow @require_flax class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 4 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3 assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1 _UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(_SCREAMING_SNAKE_CASE ) == num_samples def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa ) _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = FlaxDDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = scheduler.create_state() _UpperCAmelCase = scheduler_state _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.random.PRNGKey(0 ) _UpperCAmelCase = 50 _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) # shard inputs and rng _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3 assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1 def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) _UpperCAmelCase = jax.device_count() _UpperCAmelCase = num_samples * [prompt] _UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # With memory efficient attention _UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) _UpperCAmelCase = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1e-2
329
0
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class __A: def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=4 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.1 , _snake_case=True , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ) -> Union[str, Any]: '''simple docstring''' __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_input_mask __a = use_token_type_ids __a = use_labels __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_multiple_size __a = hidden_act __a = hidden_dropout __a = attention_dropout __a = weight_tying __a = max_position_embeddings __a = type_vocab_size __a = type_sequence_label_size __a = initializer_range __a = num_labels __a = num_choices __a = scope def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = None if self.use_input_mask: __a = random_attention_mask([self.batch_size, self.seq_length] ) __a = None if self.use_labels: __a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a = self.get_config() return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a , __a , __a , __a = self.prepare_config_and_inputs() __a = True return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Tuple: '''simple docstring''' __a = GPTNeoXJapaneseModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __a = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) __a = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = True __a = GPTNeoXJapaneseModel(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __a = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> Tuple: '''simple docstring''' __a = GPTNeoXJapaneseForCausalLM(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __a = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Dict: '''simple docstring''' __a = True __a = GPTNeoXJapaneseForCausalLM(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() # first forward pass __a = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE ) __a = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __a = ids_tensor((self.batch_size, 3) , config.vocab_size ) __a = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __a = torch.cat([input_ids, next_tokens] , dim=-1 ) __a = torch.cat([input_mask, next_mask] , dim=-1 ) __a = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE ) __a = output_from_no_past['''hidden_states'''][0] __a = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] # select random slice __a = ids_tensor((1,) , output_from_past.shape[-1] ).item() __a = output_from_no_past[:, -3:, random_slice_idx].detach() __a = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a , __a = config_and_inputs __a = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __A( a , a , unittest.TestCase ): snake_case_ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () snake_case_ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () snake_case_ = ( {'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = GPTNeoXJapaneseModelTester(self ) __a = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs_for_decoder() __a = None self.model_tester.create_and_check_model_as_decoder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*_SCREAMING_SNAKE_CASE ) @slow def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = '''abeja/gpt-neox-japanese-2.7b''' __a = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、'''] __a = [ '''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''', '''100年後に必要とされる会社は、「人」が中心の会社です。''', '''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''', '''国境の長いトンネルを抜けると、そこは雪国だった。''', '''美味しい日本食といえば、やっぱりお寿司ですよね。''', ] __a = GPTNeoXJapaneseTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) __a = GPTNeoXJapaneseForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE ) __a = [] for prompt in prompts: __a = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids __a = model.generate(_SCREAMING_SNAKE_CASE , max_length=50 ) __a = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) predicted_outputs += generated_string self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
6
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCAmelCase__ :int = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase ) class __a ( UpperCAmelCase ): def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = {} if prompt is not None: _UpperCAmelCase = prompt if generate_kwargs is not None: _UpperCAmelCase = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _UpperCAmelCase = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one' ) _UpperCAmelCase = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int: """simple docstring""" _UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE ) if prompt is not None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError( f'''Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. ''' 'Note also that one single text can be provided for conditional image to text generation.' ) _UpperCAmelCase = self.model.config.model_type if model_type == "git": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids _UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids _UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) model_inputs.update({'input_ids': input_ids} ) elif model_type == "pix2struct": _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) _UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(_SCREAMING_SNAKE_CASE ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: _UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: _UpperCAmelCase = None return model_inputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]: """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE ) and all(x is None for x in model_inputs['input_ids'] ) ): _UpperCAmelCase = None if generate_kwargs is None: _UpperCAmelCase = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _UpperCAmelCase = model_inputs.pop(self.model.main_input_name ) _UpperCAmelCase = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) return model_outputs def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = [] for output_ids in model_outputs: _UpperCAmelCase = { 'generated_text': self.tokenizer.decode( _SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , ) } records.append(_SCREAMING_SNAKE_CASE ) return records
329
0
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration _snake_case = pytest.mark.integration _snake_case = {'''comet'''} _snake_case = importlib.util.find_spec("fairseq") is not None _snake_case = {'''code_eval'''} _snake_case = os.name == '''nt''' _snake_case = {'''bertscore''', '''frugalscore''', '''perplexity'''} _snake_case = importlib.util.find_spec("transformers") is not None def A ( _lowerCamelCase ): '''simple docstring''' @wraps(a__ ) def wrapper(self , _lowerCamelCase ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("\"test requires Fairseq\"" ) else: test_case(self , a__ ) return wrapper def A ( _lowerCamelCase ): '''simple docstring''' @wraps(a__ ) def wrapper(self , _lowerCamelCase ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("\"test requires transformers\"" ) else: test_case(self , a__ ) return wrapper def A ( _lowerCamelCase ): '''simple docstring''' @wraps(a__ ) def wrapper(self , _lowerCamelCase ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("\"test not supported on Windows\"" ) else: test_case(self , a__ ) return wrapper def A ( ): '''simple docstring''' _lowerCAmelCase : str = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names()) @for_all_test_methods( a , a , a) @local class UpperCAmelCase_ ( parameterized.TestCase): lowerCamelCase__ = {} lowerCamelCase__ = None @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning") @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning") def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase : Optional[int] = "[...]" _lowerCAmelCase : List[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics", _SCREAMING_SNAKE_CASE)).module_path) _lowerCAmelCase : Any = datasets.load.import_main_class(metric_module.__name__, dataset=_SCREAMING_SNAKE_CASE) # check parameters _lowerCAmelCase : Union[str, Any] = inspect.signature(metric._compute).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs # run doctest with self.patch_intensive_calls(_SCREAMING_SNAKE_CASE, metric_module.__name__): with self.use_local_metrics(): try: _lowerCAmelCase : Dict = doctest.testmod(_SCREAMING_SNAKE_CASE, verbose=_SCREAMING_SNAKE_CASE, raise_on_error=_SCREAMING_SNAKE_CASE) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed, 0) self.assertGreater(results.attempted, 1) @slow def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase : int = "[...]" _lowerCAmelCase : Union[str, Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics", _SCREAMING_SNAKE_CASE)).module_path) # run doctest with self.use_local_metrics(): _lowerCAmelCase : Union[str, Any] = doctest.testmod(_SCREAMING_SNAKE_CASE, verbose=_SCREAMING_SNAKE_CASE, raise_on_error=_SCREAMING_SNAKE_CASE) self.assertEqual(results.failed, 0) self.assertGreater(results.attempted, 1) @contextmanager def snake_case__ ( self, __a, __a): '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](_SCREAMING_SNAKE_CASE): yield else: yield @contextmanager def snake_case__ ( self): '''simple docstring''' def load_local_metric(__a, *__a, **__a): return load_metric(os.path.join("metrics", _SCREAMING_SNAKE_CASE), *_SCREAMING_SNAKE_CASE, **_SCREAMING_SNAKE_CASE) with patch("datasets.load_metric") as mock_load_metric: _lowerCAmelCase : int = load_local_metric yield @classmethod def snake_case__ ( cls, __a): '''simple docstring''' def wrapper(__a): _lowerCAmelCase : Optional[Any] = contextmanager(_SCREAMING_SNAKE_CASE) _lowerCAmelCase : Optional[Any] = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("bleurt" ) def A ( _lowerCamelCase ): '''simple docstring''' import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags class UpperCAmelCase_ ( a): def snake_case__ ( self, __a): '''simple docstring''' assert len(input_dict["input_ids"]) == 2 return np.array([1.03, 1.04]) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("bleurt.score._create_predictor" ) as mock_create_predictor: _lowerCAmelCase : Dict = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("bertscore" ) def A ( _lowerCamelCase ): '''simple docstring''' import torch def bert_cos_score_idf(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ): return torch.tensor([[1.0, 1.0, 1.0]] * len(a__ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("bert_score.scorer.get_model" ), patch( "bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf: _lowerCAmelCase : Dict = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("comet" ) def A ( _lowerCamelCase ): '''simple docstring''' def load_from_checkpoint(_lowerCamelCase ): class UpperCAmelCase_ : def snake_case__ ( self, __a, *__a, **__a): '''simple docstring''' assert len(_SCREAMING_SNAKE_CASE) == 2 _lowerCAmelCase : List[Any] = [0.19, 0.92] return scores, sum(_SCREAMING_SNAKE_CASE) / len(_SCREAMING_SNAKE_CASE) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("comet.download_model" ) as mock_download_model: _lowerCAmelCase : Union[str, Any] = None with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint: _lowerCAmelCase : List[Any] = load_from_checkpoint yield def A ( ): '''simple docstring''' _lowerCAmelCase : Optional[int] = load_metric(os.path.join("metrics" , "seqeval" ) ) _lowerCAmelCase : List[Any] = "ERROR" _lowerCAmelCase : int = F"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}" with pytest.raises(a__ , match=re.escape(a__ ) ): metric.compute(predictions=[] , references=[] , scheme=a__ )
36
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]: '''simple docstring''' from .. import __version__ _UpperCAmelCase = take_from _UpperCAmelCase = () if not isinstance(args[0] , a__ ): _UpperCAmelCase = (args,) for attribute, version_name, message in args: if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ): raise ValueError( F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'''' F''' version {__version__} is >= {version_name}''' ) _UpperCAmelCase = None if isinstance(a__ , a__ ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(a__ ),) _UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.''' elif hasattr(a__ , a__ ): values += (getattr(a__ , a__ ),) _UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.''' elif deprecated_kwargs is None: _UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.''' if warning is not None: _UpperCAmelCase = warning + ' ' if standard_warn else '' warnings.warn(warning + message , a__ , stacklevel=a__ ) if isinstance(a__ , a__ ) and len(a__ ) > 0: _UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1] _UpperCAmelCase = call_frame.filename _UpperCAmelCase = call_frame.lineno _UpperCAmelCase = call_frame.function _UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' ) if len(a__ ) == 0: return elif len(a__ ) == 1: return values[0] return values
329
0
'''simple docstring''' UpperCAmelCase : Optional[int] = '''Alexander Joslin''' import operator as op from .stack import Stack def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub} __SCREAMING_SNAKE_CASE = Stack() __SCREAMING_SNAKE_CASE = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(a__ ) ) elif i in operators: # RULE 2 operator_stack.push(a__ ) elif i == ")": # RULE 4 __SCREAMING_SNAKE_CASE = operator_stack.peek() operator_stack.pop() __SCREAMING_SNAKE_CASE = operand_stack.peek() operand_stack.pop() __SCREAMING_SNAKE_CASE = operand_stack.peek() operand_stack.pop() __SCREAMING_SNAKE_CASE = operators[opr](a__ , a__ ) operand_stack.push(a__ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": UpperCAmelCase : Any = '''(5 + ((4 * 2) * (2 + 3)))''' # answer = 45 print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
267
import math lowerCAmelCase__ :Optional[int] = 1_0 lowerCAmelCase__ :Optional[Any] = 7 lowerCAmelCase__ :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS def lowerCAmelCase__ ( a__: int = 2_0 ) -> str: '''simple docstring''' _UpperCAmelCase = math.comb(a__ , a__ ) _UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ ) _UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total) return F'''{result:.9f}''' if __name__ == "__main__": print(solution(2_0))
329
0
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch snake_case__ : int = logging.get_logger(__name__) class A_ ( _lowerCamelCase ): lowerCAmelCase__ = ['pixel_values'] def __init__(self :Any , _UpperCamelCase :str = True , _UpperCamelCase :Optional[Any] = None , _UpperCamelCase :Any = PILImageResampling.BILINEAR , _UpperCamelCase :List[str] = True , _UpperCamelCase :str = 1 / 255 , _UpperCamelCase :List[Any] = True , _UpperCamelCase :str = None , _UpperCamelCase :Union[str, Any] = True , **_UpperCamelCase :Union[str, Any] , )-> None: super().__init__(**_SCREAMING_SNAKE_CASE ) __A = size if size is not None else {'''shortest_edge''': 224} __A = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) __A = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256} __A = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' ) __A = do_resize __A = size __A = resample __A = do_rescale __A = rescale_factor __A = do_center_crop __A = crop_size __A = do_flip_channel_order def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :str , _UpperCamelCase :List[str] , _UpperCamelCase :Optional[int] = PIL.Image.BILINEAR , _UpperCamelCase :List[Any] = None , **_UpperCamelCase :Optional[Any] , )-> np.ndarray: __A = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" ) __A = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE ) return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase (self :Dict , _UpperCamelCase :List[Any] , _UpperCamelCase :int , _UpperCamelCase :Dict = None , **_UpperCamelCase :List[str] , )-> np.ndarray: __A = get_size_dict(_SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase (self :Tuple , _UpperCamelCase :int , _UpperCamelCase :Optional[int] , _UpperCamelCase :List[str] = None , **_UpperCamelCase :int , )-> Any: return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[Any] , _UpperCamelCase :Union[str, Any] = None )-> np.ndarray: return flip_channel_order(_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase (self :int , _UpperCamelCase :List[str] , _UpperCamelCase :Optional[int] = None , _UpperCamelCase :Tuple = None , _UpperCamelCase :List[Any] = None , _UpperCamelCase :List[str] = None , _UpperCamelCase :Any = None , _UpperCamelCase :Optional[Any] = None , _UpperCamelCase :int = None , _UpperCamelCase :Any = None , _UpperCamelCase :int = None , _UpperCamelCase :str = ChannelDimension.FIRST , **_UpperCamelCase :List[Any] , )-> PIL.Image.Image: __A = do_resize if do_resize is not None else self.do_resize __A = resample if resample is not None else self.resample __A = do_rescale if do_rescale is not None else self.do_rescale __A = rescale_factor if rescale_factor is not None else self.rescale_factor __A = do_center_crop if do_center_crop is not None else self.do_center_crop __A = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) __A = size if size is not None else self.size __A = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) __A = crop_size if crop_size is not None else self.crop_size __A = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' ) __A = make_list_of_images(_SCREAMING_SNAKE_CASE ) if not valid_images(_SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) # All transformations expect numpy arrays. __A = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images] if do_resize: __A = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images] if do_center_crop: __A = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: __A = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: __A = [self.flip_channel_order(image=_SCREAMING_SNAKE_CASE ) for image in images] __A = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] __A = {'''pixel_values''': images} return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :Tuple , _UpperCamelCase :Any = None )-> Optional[Any]: __A = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(_SCREAMING_SNAKE_CASE ): __A = target_sizes.numpy() __A = [] for idx in range(len(_SCREAMING_SNAKE_CASE ) ): __A = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_SCREAMING_SNAKE_CASE ) __A = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_SCREAMING_SNAKE_CASE ) else: __A = logits.argmax(dim=1 ) __A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
117
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ :str = { '''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :Union[str, Any] = [ '''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegatronBertForCausalLM''', '''MegatronBertForMaskedLM''', '''MegatronBertForMultipleChoice''', '''MegatronBertForNextSentencePrediction''', '''MegatronBertForPreTraining''', '''MegatronBertForQuestionAnswering''', '''MegatronBertForSequenceClassification''', '''MegatronBertForTokenClassification''', '''MegatronBertModel''', '''MegatronBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
329
0
'''simple docstring''' import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class UpperCamelCase__ ( unittest.TestCase): def lowercase_ ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' debug_launcher(test_script.main ) def lowercase_ ( self :Optional[int] ) -> Optional[int]: '''simple docstring''' debug_launcher(test_ops.main )
161
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = AutoConfig.from_pretrained(a__ ) _UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ ) _UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ ) _UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": _UpperCAmelCase = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": _UpperCAmelCase = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = 'TransientGlobalSelfAttention' else: raise ValueError( 'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`' ' attribute with a value from [\'local\', \'transient-global].' ) # Encoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_global_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = tax_mlp_layer_norm _UpperCAmelCase = flax_model_encoder_layer_block # Only for layer 0: _UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_encoder_global_rel_embedding # Assigning _UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale'] _UpperCAmelCase = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _UpperCAmelCase = F'''layers_{str(a__ )}''' # Self-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] _UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel'] _UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer'] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_pre_attention_layer_norm _UpperCAmelCase = tax_enc_dec_attention_key _UpperCAmelCase = tax_enc_dec_attention_out _UpperCAmelCase = tax_enc_dec_attention_query _UpperCAmelCase = tax_enc_dec_attention_value _UpperCAmelCase = tax_cross_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = txa_mlp_layer_norm _UpperCAmelCase = flax_model_decoder_layer_block # Decoder Normalization _UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale'] _UpperCAmelCase = txa_decoder_norm # Only for layer 0: _UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T _UpperCAmelCase = tax_decoder_rel_embedding # Token Embeddings _UpperCAmelCase = tax_model['target']['token_embedder']['embedding'] _UpperCAmelCase = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(a__ ) print('T5X Model was sucessfully converted!' ) if __name__ == "__main__": lowerCAmelCase__ :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) lowerCAmelCase__ :List[str] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
329
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase__ : List[Any] = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : int = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : str = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Dict = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowercase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
187
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :List[Any] = logging.get_logger(__name__) lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : str = 'ctrl' _a : Tuple = ['past_key_values'] _a : List[Any] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = dff _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache super().__init__(**_SCREAMING_SNAKE_CASE )
329
0
import itertools import math def _a ( a :int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _a ( ) -> int: a = 2 while True: if is_prime(a__ ): yield num num += 1 def _a ( a :int = 10_001 ) -> int: return next(itertools.islice(prime_generator() , nth - 1 , a__ ) ) if __name__ == "__main__": print(f"""{solution() = }""")
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __a ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict: """simple docstring""" _UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_pad def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" if not batched: _UpperCAmelCase = image_inputs[0] if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ): _UpperCAmelCase , _UpperCAmelCase = image.size else: _UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2] if w < h: _UpperCAmelCase = int(self.size['shortest_edge'] * h / w ) _UpperCAmelCase = self.size['shortest_edge'] elif w > h: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = int(self.size['shortest_edge'] * w / h ) else: _UpperCAmelCase = self.size['shortest_edge'] _UpperCAmelCase = self.size['shortest_edge'] else: _UpperCAmelCase = [] for image in image_inputs: _UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0] _UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __a ( UpperCAmelCase , unittest.TestCase ): _a : str = DeformableDetrImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = DeformableDetrImageProcessingTester(self ) @property def UpperCAmelCase__ ( self ) -> str: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'image_id': 39769, 'annotations': target} # encode them _UpperCAmelCase = DeformableDetrImageProcessor() _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) @slow def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: _UpperCAmelCase = json.loads(f.read() ) _UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target} _UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them _UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' ) _UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values _UpperCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area _UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes _UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id _UpperCAmelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd _UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels _UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify masks _UpperCAmelCase = 822873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE ) # verify orig_size _UpperCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size _UpperCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
329
0