code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { 'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json', 'Salesforce/blip-vqa-capfit-large': ( 'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-base': ( 'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-large': ( 'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json' ), 'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json', 'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json', 'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json', 'Salesforce/blip-itm-large-flikr': ( 'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json' ), } class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" snake_case__ = "blip_text_model" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=30_524 , SCREAMING_SNAKE_CASE__ : Optional[int]=768 , SCREAMING_SNAKE_CASE__ : Any=768 , SCREAMING_SNAKE_CASE__ : Any=3_072 , SCREAMING_SNAKE_CASE__ : Dict=768 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=512 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Dict=1e-1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=30_522 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=102 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : int=True , **SCREAMING_SNAKE_CASE__ : int , ) -> Dict: super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , sep_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = vocab_size lowerCAmelCase__ = hidden_size lowerCAmelCase__ = encoder_hidden_size lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = projection_dim lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = hidden_act lowerCAmelCase__ = initializer_range lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = is_decoder lowerCAmelCase__ = use_cache @classmethod def a ( cls : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Any ) -> "PretrainedConfig": cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) # get the text config dict if we are loading from BlipConfig if config_dict.get("model_type" ) == "blip": lowerCAmelCase__ = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" snake_case__ = "blip_vision_model" def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Dict=768 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_072 , SCREAMING_SNAKE_CASE__ : Optional[int]=512 , SCREAMING_SNAKE_CASE__ : Optional[int]=12 , SCREAMING_SNAKE_CASE__ : Any=12 , SCREAMING_SNAKE_CASE__ : str=384 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=1e-5 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-1_0 , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> int: super().__init__(**SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = hidden_size lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = projection_dim lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = patch_size lowerCAmelCase__ = image_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = attention_dropout lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = hidden_act @classmethod def a ( cls : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : str ) -> "PretrainedConfig": cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) # get the vision config dict if we are loading from BlipConfig if config_dict.get("model_type" ) == "blip": lowerCAmelCase__ = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" snake_case__ = "blip" snake_case__ = True def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Any=512 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2.6_592 , SCREAMING_SNAKE_CASE__ : Tuple=256 , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE__ ) if text_config is None: lowerCAmelCase__ = {} logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." ) if vision_config is None: lowerCAmelCase__ = {} logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." ) lowerCAmelCase__ = BlipTextConfig(**SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = BlipVisionConfig(**SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self.vision_config.hidden_size lowerCAmelCase__ = projection_dim lowerCAmelCase__ = logit_scale_init_value lowerCAmelCase__ = 1.0 lowerCAmelCase__ = 0.02 lowerCAmelCase__ = image_text_hidden_size @classmethod def a ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE__ : BlipTextConfig , SCREAMING_SNAKE_CASE__ : BlipVisionConfig , **SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE__ ) def a ( self : Tuple ) -> Optional[int]: lowerCAmelCase__ = copy.deepcopy(self.__dict__ ) lowerCAmelCase__ = self.text_config.to_dict() lowerCAmelCase__ = self.vision_config.to_dict() lowerCAmelCase__ = self.__class__.model_type return output
61
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = 42 class SCREAMING_SNAKE_CASE (a__ , a__ ): @register_to_config def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ): '''simple docstring''' super().__init__() __A : Dict = sample_size # time if time_embedding_type == "fourier": __A : int = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase) __A : Any = 2 * block_out_channels[0] elif time_embedding_type == "positional": __A : List[str] = Timesteps( block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase) __A : List[str] = block_out_channels[0] if use_timestep_embedding: __A : Optional[Any] = block_out_channels[0] * 4 __A : Optional[int] = TimestepEmbedding( in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , ) __A : Dict = nn.ModuleList([]) __A : Dict = None __A : Tuple = nn.ModuleList([]) __A : Tuple = None # down __A : Any = in_channels for i, down_block_type in enumerate(_UpperCAmelCase): __A : Tuple = output_channel __A : Optional[Any] = block_out_channels[i] if i == 0: input_channel += extra_in_channels __A : List[str] = i == len(_UpperCAmelCase) - 1 __A : int = get_down_block( _UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(_UpperCAmelCase) # mid __A : str = get_mid_block( _UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , ) # up __A : Optional[int] = list(reversed(_UpperCAmelCase)) __A : Optional[int] = reversed_block_out_channels[0] if out_block_type is None: __A : str = out_channels else: __A : List[Any] = block_out_channels[0] for i, up_block_type in enumerate(_UpperCAmelCase): __A : Optional[Any] = output_channel __A : Optional[Any] = ( reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels ) __A : Dict = i == len(_UpperCAmelCase) - 1 __A : str = get_up_block( _UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(_UpperCAmelCase) __A : Optional[int] = output_channel # out __A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32) __A : Optional[Any] = get_out_block( out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , ) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ): '''simple docstring''' __A : Any = timestep if not torch.is_tensor(_UpperCAmelCase): __A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device) elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0: __A : Any = timesteps[None].to(sample.device) __A : List[Any] = self.time_proj(_UpperCAmelCase) if self.config.use_timestep_embedding: __A : Dict = self.time_mlp(_UpperCAmelCase) else: __A : Dict = timestep_embed[..., None] __A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) __A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:])) # 2. down __A : int = () for downsample_block in self.down_blocks: __A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase) down_block_res_samples += res_samples # 3. mid if self.mid_block: __A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase) # 4. up for i, upsample_block in enumerate(self.up_blocks): __A : Any = down_block_res_samples[-1:] __A : Optional[int] = down_block_res_samples[:-1] __A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase) # 5. post-process if self.out_block: __A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase) if not return_dict: return (sample,) return UNetaDOutput(sample=_UpperCAmelCase)
8
0
snake_case = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} snake_case = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = True SCREAMING_SNAKE_CASE : Any = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(lowercase , lowercase , lowercase ) order.append(lowercase ) return order def lowerCamelCase__ ( lowercase , lowercase , lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : List[Any] = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(lowercase , lowercase , lowercase ) return component def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) * [False] SCREAMING_SNAKE_CASE : dict[int, list[int]] = {vert: [] for vert in range(len(lowercase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(lowercase ) SCREAMING_SNAKE_CASE : Optional[Any] = [] for i, was_visited in enumerate(lowercase ): if not was_visited: order += topology_sort(lowercase , lowercase , lowercase ) SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : List[str] = len(lowercase ) * [False] for i in range(len(lowercase ) ): SCREAMING_SNAKE_CASE : str = order[len(lowercase ) - i - 1] if not visited[vert]: SCREAMING_SNAKE_CASE : int = find_components(lowercase , lowercase , lowercase ) components_list.append(lowercase ) return components_list
62
'''simple docstring''' def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int: if len(__snake_case ) != len(__snake_case ): raise ValueError('String lengths must match!' ) __A : Optional[Any] = 0 for chara, chara in zip(__snake_case , __snake_case ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
8
0
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ): # Initialise PyTorch model __UpperCAmelCase : List[str] = AlbertConfig.from_json_file(__lowerCamelCase ) print(f"""Building PyTorch model from configuration: {config}""" ) __UpperCAmelCase : Any = AlbertForPreTraining(__lowerCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_albert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , __lowerCamelCase ) if __name__ == "__main__": a : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--albert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained ALBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) a : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
63
'''simple docstring''' import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() lowercase__ : Tuple = logging.get_logger(__name__) def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]: __A : int = RobertaPreLayerNormConfig.from_pretrained( __snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] ) # convert state_dict __A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) ) __A : str = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.' ): __A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ): continue __A : str = tensor_value __A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case ) model.save_pretrained(__snake_case ) # convert tokenizer __A : List[Any] = AutoTokenizer.from_pretrained(__snake_case ) tokenizer.save_pretrained(__snake_case ) if __name__ == "__main__": lowercase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowercase__ : Optional[Any] = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
8
0
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase_ : int = logging.get_logger(__name__) def A__ ( snake_case_ : Any , snake_case_ : List[str] ): SCREAMING_SNAKE_CASE__: List[str]= set() SCREAMING_SNAKE_CASE__: int= [] def parse_line(snake_case_ : List[str] ): for line in fp: if isinstance(snake_case_ , snake_case_ ): SCREAMING_SNAKE_CASE__: Dict= line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(snake_case_ ) > 0: SCREAMING_SNAKE_CASE__: List[str]= '''\n'''.join(snake_case_ ) # Only keep the warnings specified in `targets` if any(F': {x}: ' in warning for x in targets ): selected_warnings.add(snake_case_ ) buffer.clear() continue else: SCREAMING_SNAKE_CASE__: int= line.strip() buffer.append(snake_case_ ) if from_gh: for filename in os.listdir(snake_case_ ): SCREAMING_SNAKE_CASE__: Optional[Any]= os.path.join(snake_case_ , snake_case_ ) if not os.path.isdir(snake_case_ ): # read the file if filename != "warnings.txt": continue with open(snake_case_ ) as fp: parse_line(snake_case_ ) else: try: with zipfile.ZipFile(snake_case_ ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case_ ): # read the file if filename != "warnings.txt": continue with z.open(snake_case_ ) as fp: parse_line(snake_case_ ) except Exception: logger.warning( F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' ) return selected_warnings def A__ ( snake_case_ : Dict , snake_case_ : Any ): SCREAMING_SNAKE_CASE__: List[str]= set() SCREAMING_SNAKE_CASE__: Tuple= [os.path.join(snake_case_ , snake_case_ ) for p in os.listdir(snake_case_ ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case_ , snake_case_ ) ) return selected_warnings if __name__ == "__main__": def A__ ( snake_case_ : int ): return values.split(''',''' ) lowercase_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') # optional parameters parser.add_argument( '--targets', default='DeprecationWarning,UserWarning,FutureWarning', type=list_str, help='Comma-separated list of target warning(s) which we want to extract.', ) parser.add_argument( '--from_gh', action='store_true', help='If running from a GitHub action workflow and collecting warnings from its artifacts.', ) lowercase_ : Tuple = parser.parse_args() lowercase_ : List[Any] = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase_ : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print('=' * 8_0) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase_ : Optional[Any] = extract_warnings(args.output_dir, args.targets) lowercase_ : Any = sorted(selected_warnings) with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
64
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings lowercase__ : Dict = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} ) lowerCAmelCase = field( default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': ( '''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `max_length` value of the model configuration.''' ) } , ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': ( '''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `num_beams` value of the model configuration.''' ) } , ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.''' } , ) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = super().to_dict() for k, v in d.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase): __A : List[Any] = v.to_dict() return d
8
0
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": __UpperCAmelCase = pd.read_csv('sample_data.csv', header=None) __UpperCAmelCase = df.shape[:1][0] # If you're using some other dataset input the target column __UpperCAmelCase = df.iloc[:, 1:2] __UpperCAmelCase = actual_data.values.reshape(len_data, 1) __UpperCAmelCase = MinMaxScaler().fit_transform(actual_data) __UpperCAmelCase = 10 __UpperCAmelCase = 5 __UpperCAmelCase = 20 __UpperCAmelCase = len_data - periods * look_back __UpperCAmelCase = actual_data[:division] __UpperCAmelCase = actual_data[division - look_back :] __UpperCAmelCase, __UpperCAmelCase = [], [] __UpperCAmelCase, __UpperCAmelCase = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) __UpperCAmelCase = np.array(train_x) __UpperCAmelCase = np.array(test_x) __UpperCAmelCase = np.array([list(i.ravel()) for i in train_y]) __UpperCAmelCase = np.array([list(i.ravel()) for i in test_y]) __UpperCAmelCase = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss='mean_squared_error', optimizer='adam') __UpperCAmelCase = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) __UpperCAmelCase = model.predict(x_test)
65
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : List[Any] = logging.get_logger(__name__) lowercase__ : Optional[int] = { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = '''lxmert''' lowerCAmelCase = {} def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ): '''simple docstring''' __A : Tuple = vocab_size __A : int = hidden_size __A : str = num_attention_heads __A : Tuple = hidden_act __A : int = intermediate_size __A : str = hidden_dropout_prob __A : Optional[int] = attention_probs_dropout_prob __A : Optional[Any] = max_position_embeddings __A : Tuple = type_vocab_size __A : Optional[int] = initializer_range __A : Any = layer_norm_eps __A : Optional[Any] = num_qa_labels __A : Optional[int] = num_object_labels __A : Any = num_attr_labels __A : Union[str, Any] = l_layers __A : Optional[int] = x_layers __A : List[Any] = r_layers __A : Tuple = visual_feat_dim __A : Tuple = visual_pos_dim __A : Optional[int] = visual_loss_normalizer __A : int = task_matched __A : List[Any] = task_mask_lm __A : Optional[Any] = task_obj_predict __A : str = task_qa __A : List[Any] = visual_obj_loss __A : Optional[Any] = visual_attr_loss __A : Union[str, Any] = visual_feat_loss __A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers} super().__init__(**_UpperCAmelCase)
8
0
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": UpperCamelCase = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: "))) print("Googling.....") UpperCamelCase = f'''https://www.google.com/search?q={query}&num=100''' UpperCamelCase = requests.get( url, headers={"User-Agent": str(UserAgent().random)}, ) try: UpperCamelCase = ( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "yuRUbf"}) .find("a") .get("href") ) except AttributeError: UpperCamelCase = parse_qs( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "kCrYT"}) .find("a") .get("href") )["url"][0] webbrowser.open(link)
66
'''simple docstring''' import math import sys def _lowerCAmelCase ( __snake_case : int ) -> int: if number != int(__snake_case ): raise ValueError('the value of input must be a natural number' ) if number < 0: raise ValueError('the value of input must not be a negative number' ) if number == 0: return 1 __A : str = [-1] * (number + 1) __A : Dict = 0 for i in range(1 , number + 1 ): __A : int = sys.maxsize __A : int = int(math.sqrt(__snake_case ) ) for j in range(1 , root + 1 ): __A : str = 1 + answers[i - (j**2)] __A : Dict = min(__snake_case , __snake_case ) __A : Union[str, Any] = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
8
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case = { """configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""AlbertTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = ["""AlbertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """AlbertForMaskedLM""", """AlbertForMultipleChoice""", """AlbertForPreTraining""", """AlbertForQuestionAnswering""", """AlbertForSequenceClassification""", """AlbertForTokenClassification""", """AlbertModel""", """AlbertPreTrainedModel""", """load_tf_weights_in_albert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFAlbertForMaskedLM""", """TFAlbertForMultipleChoice""", """TFAlbertForPreTraining""", """TFAlbertForQuestionAnswering""", """TFAlbertForSequenceClassification""", """TFAlbertForTokenClassification""", """TFAlbertMainLayer""", """TFAlbertModel""", """TFAlbertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """FlaxAlbertForMaskedLM""", """FlaxAlbertForMultipleChoice""", """FlaxAlbertForPreTraining""", """FlaxAlbertForQuestionAnswering""", """FlaxAlbertForSequenceClassification""", """FlaxAlbertForTokenClassification""", """FlaxAlbertModel""", """FlaxAlbertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
67
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]: __A : int = list(range(len(__snake_case ) ) ) __A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )] index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case ) __A : float = 0 __A : list[float] = [0] * len(__snake_case ) for i in index: if weight[i] <= capacity: __A : Optional[int] = 1 max_value += value[i] capacity -= weight[i] else: __A : List[Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
8
0
import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser __A = re.compile(r"\s+") def lowercase__ ( A_: int ) -> Any: """simple docstring""" return {"hash": hashlib.mda(re.sub(A_ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()} def lowercase__ ( A_: int ) -> Optional[int]: """simple docstring""" __UpperCAmelCase =[len(A_ ) for line in example["""content"""].splitlines()] return {"line_mean": np.mean(A_ ), "line_max": max(A_ )} def lowercase__ ( A_: Any ) -> int: """simple docstring""" __UpperCAmelCase =np.mean([c.isalnum() for c in example["""content"""]] ) return {"alpha_frac": alpha_frac} def lowercase__ ( A_: List[Any] , A_: Tuple ) -> str: """simple docstring""" if example["hash"] in uniques: uniques.remove(example["""hash"""] ) return True else: return False def lowercase__ ( A_: List[str] , A_: Dict=5 ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase =["""auto-generated""", """autogenerated""", """automatically generated"""] __UpperCAmelCase =example["""content"""].splitlines() for _, line in zip(range(A_ ) , A_ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def lowercase__ ( A_: str , A_: List[Any]=5 , A_: List[Any]=0.0_5 ) -> List[str]: """simple docstring""" __UpperCAmelCase =["""unit tests""", """test file""", """configuration file"""] __UpperCAmelCase =example["""content"""].splitlines() __UpperCAmelCase =0 __UpperCAmelCase =0 # first test for _, line in zip(range(A_ ) , A_ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test __UpperCAmelCase =example["""content"""].count("""\n""" ) __UpperCAmelCase =int(coeff * nlines ) for line in lines: count_config += line.lower().count("""config""" ) count_test += line.lower().count("""test""" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def lowercase__ ( A_: Any ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase =["""def """, """class """, """for """, """while """] __UpperCAmelCase =example["""content"""].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def lowercase__ ( A_: Optional[int] , A_: List[Any]=4 ) -> Any: """simple docstring""" __UpperCAmelCase =example["""content"""].splitlines() __UpperCAmelCase =0 for line in lines: counter += line.lower().count("""=""" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def lowercase__ ( A_: List[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase =tokenizer(example["""content"""] , truncation=A_ )["""input_ids"""] __UpperCAmelCase =len(example["""content"""] ) / len(A_ ) return {"ratio": ratio} def lowercase__ ( A_: int ) -> str: """simple docstring""" __UpperCAmelCase ={} results.update(get_hash(A_ ) ) results.update(line_stats(A_ ) ) results.update(alpha_stats(A_ ) ) results.update(char_token_ratio(A_ ) ) results.update(is_autogenerated(A_ ) ) results.update(is_config_or_test(A_ ) ) results.update(has_no_keywords(A_ ) ) results.update(has_few_assignments(A_ ) ) return results def lowercase__ ( A_: Dict , A_: Any , A_: List[str] ) -> str: """simple docstring""" if not check_uniques(A_ , A_ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def lowercase__ ( A_: List[Any] ) -> Tuple: """simple docstring""" with open(A_ , """rb""" ) as f_in: with gzip.open(str(A_ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out: shutil.copyfileobj(A_ , A_ ) os.unlink(A_ ) # Settings __A = HfArgumentParser(PreprocessingArguments) __A = parser.parse_args() if args.num_workers is None: __A = multiprocessing.cpu_count() __A = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset __A = time.time() __A = load_dataset(args.dataset_name, split="train") print(F"""Time to load dataset: {time.time()-t_start:.2f}""") # Run preprocessing __A = time.time() __A = ds.map(preprocess, num_proc=args.num_workers) print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""") # Deduplicate hashes __A = set(ds.unique("hash")) __A = len(uniques) / len(ds) print(F"""Fraction of duplicates: {1-frac:.2%}""") # Deduplicate data and apply heuristics __A = time.time() __A = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args}) print(F"""Time to filter dataset: {time.time()-t_start:.2f}""") print(F"""Size of filtered dataset: {len(ds_filter)}""") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: __A = time.time() __A , __A = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""") print(F"""Size of deduplicate dataset: {len(ds_filter)}""") # Save data in batches of samples_per_file __A = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / "duplicate_clusters.json", "w") as f: json.dump(duplicate_clusters, f) __A = output_dir / "data" data_dir.mkdir(exist_ok=True) __A = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): __A = str(data_dir / F"""file-{file_number+1:012}.json""") __A = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
68
'''simple docstring''' from __future__ import annotations import math class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase): '''simple docstring''' __A : int = size # approximate the overall size of segment tree with given value __A : Optional[Any] = [0 for i in range(0 , 4 * size)] # create array to store lazy update __A : Optional[Any] = [0 for i in range(0 , 4 * size)] __A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' return idx * 2 def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' return idx * 2 + 1 def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if left_element == right_element: __A : List[Any] = a[left_element - 1] else: __A : List[str] = (left_element + right_element) // 2 self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase) __A : Any = max( self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)]) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if self.flag[idx] is True: __A : Optional[Any] = self.lazy[idx] __A : Optional[Any] = False if left_element != right_element: __A : List[Any] = self.lazy[idx] __A : Dict = self.lazy[idx] __A : Tuple = True __A : Union[str, Any] = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: __A : Optional[int] = val if left_element != right_element: __A : Tuple = val __A : Any = val __A : Tuple = True __A : Union[str, Any] = True return True __A : str = (left_element + right_element) // 2 self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) __A : int = max( self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)]) return True def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if self.flag[idx] is True: __A : Union[str, Any] = self.lazy[idx] __A : List[str] = False if left_element != right_element: __A : Union[str, Any] = self.lazy[idx] __A : Optional[int] = self.lazy[idx] __A : str = True __A : Union[str, Any] = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] __A : Any = (left_element + right_element) // 2 __A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) __A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return max(_UpperCAmelCase , _UpperCAmelCase) def __str__( self): '''simple docstring''' return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)]) if __name__ == "__main__": lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] lowercase__ : str = 15 lowercase__ : List[Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 1_11) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 2_35) print(segt)
8
0
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : int ) -> Tuple: __snake_case = 1 __snake_case = 2 while i * i <= n: __snake_case = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def __UpperCAmelCase ( ) -> Tuple: __snake_case = 1 __snake_case = 1 while True: i += 1 t_num += i if count_divisors(_UpperCAmelCase ) > 5_00: break return t_num if __name__ == "__main__": print(solution())
69
'''simple docstring''' def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float: __A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def _lowerCAmelCase ( ) -> Union[str, Any]: print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
8
0
from collections import Counter from timeit import timeit def _SCREAMING_SNAKE_CASE ( lowercase : str = "" , ): '''simple docstring''' return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2 def _SCREAMING_SNAKE_CASE ( lowercase : str = "" ): '''simple docstring''' if len(lowercase ) == 0: return True lowerCamelCase_ = input_str.replace(' ' , '' ).lower() # character_freq_dict: Stores the frequency of every character in the input string lowerCamelCase_ = {} for character in lower_case_input_str: lowerCamelCase_ = character_freq_dict.get(lowercase , 0 ) + 1 lowerCamelCase_ = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def _SCREAMING_SNAKE_CASE ( lowercase : str = "" ): '''simple docstring''' print('\nFor string = ' , lowercase , ':' ) print( '> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(lowercase ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) print( '> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(lowercase ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) if __name__ == "__main__": lowerCamelCase : Optional[Any] = input( "Enter string to determine if it can be rearranged as a palindrome or not: " ).strip() benchmark(check_str) lowerCamelCase : int = can_string_be_rearranged_as_palindrome_counter(check_str) print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
70
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ): '''simple docstring''' __A : Optional[int] = parent __A : str = 13 __A : List[Any] = 7 __A : List[str] = True __A : str = True __A : Optional[Any] = True __A : int = True __A : Dict = 99 __A : Dict = 384 __A : Any = 2 __A : int = 4 __A : Optional[Any] = 37 __A : Optional[int] = 'gelu' __A : Dict = 0.1 __A : Optional[int] = 0.1 __A : Any = 512 __A : int = 16 __A : List[str] = 2 __A : str = 0.02 __A : Any = 3 __A : str = 4 __A : Union[str, Any] = 128 __A : int = 2 __A : List[Any] = 9 __A : List[Any] = 1 __A : List[Any] = None def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __A : str = None if self.use_input_mask: __A : List[Any] = random_attention_mask([self.batch_size, self.seq_length]) __A : Optional[Any] = None if self.use_token_type_ids: __A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __A : Optional[int] = None __A : List[str] = None __A : Dict = None if self.use_labels: __A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) __A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __A : str = ids_tensor([self.batch_size] , self.num_choices) __A : List[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : int = TFConvBertModel(config=_UpperCAmelCase) __A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __A : Tuple = [input_ids, input_mask] __A : Any = model(_UpperCAmelCase) __A : Dict = model(_UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase) __A : str = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : str = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Optional[int] = self.num_labels __A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase) __A : Optional[Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : Dict = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Tuple = self.num_choices __A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase) __A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1)) __A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1)) __A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1)) __A : int = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __A : Optional[Any] = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : List[Any] = self.num_labels __A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase) __A : str = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : int = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase) __A : Any = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : Union[str, Any] = model(_UpperCAmelCase) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[int] = self.prepare_config_and_inputs() ( ( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) , ) : Union[str, Any] = config_and_inputs __A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ): lowerCAmelCase = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) lowerCAmelCase = ( { '''feature-extraction''': TFConvBertModel, '''fill-mask''': TFConvBertForMaskedLM, '''question-answering''': TFConvBertForQuestionAnswering, '''text-classification''': TFConvBertForSequenceClassification, '''token-classification''': TFConvBertForTokenClassification, '''zero-shot''': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = TFConvBertModelTester(self) __A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __A : List[str] = True __A : List[str] = True if hasattr(_UpperCAmelCase , 'use_cache'): __A : List[Any] = True __A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length) __A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase) for model_class in self.all_model_classes: __A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase) __A : Optional[int] = model_class(_UpperCAmelCase) __A : Optional[Any] = len(model(_UpperCAmelCase)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase) __A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1') __A : Tuple = tf.keras.models.load_model(_UpperCAmelCase) __A : str = model(_UpperCAmelCase) if self.is_encoder_decoder: __A : Optional[int] = outputs['encoder_hidden_states'] __A : str = outputs['encoder_attentions'] else: __A : List[Any] = outputs['hidden_states'] __A : Optional[Any] = outputs['attentions'] self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase) __A : str = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase) self.assertListEqual( list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base') self.assertIsNotNone(_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common() __A : Any = True __A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length) __A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length) __A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase) __A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase) def check_decoder_attentions_output(_UpperCAmelCase): __A : List[str] = len(_UpperCAmelCase) self.assertEqual(out_len % 2 , 0) __A : Any = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_UpperCAmelCase): __A : str = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __A : Dict = True __A : Any = False __A : str = model_class(_UpperCAmelCase) __A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : List[str] = len(_UpperCAmelCase) self.assertEqual(config.output_hidden_states , _UpperCAmelCase) check_encoder_attentions_output(_UpperCAmelCase) if self.is_encoder_decoder: __A : Union[str, Any] = model_class(_UpperCAmelCase) __A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) self.assertEqual(config.output_hidden_states , _UpperCAmelCase) check_decoder_attentions_output(_UpperCAmelCase) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __A : int = True __A : Tuple = model_class(_UpperCAmelCase) __A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) self.assertEqual(config.output_hidden_states , _UpperCAmelCase) check_encoder_attentions_output(_UpperCAmelCase) # Check attention is always last and order is fine __A : Any = True __A : str = True __A : Union[str, Any] = model_class(_UpperCAmelCase) __A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase)) self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase) check_encoder_attentions_output(_UpperCAmelCase) @require_tf class SCREAMING_SNAKE_CASE (unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base') __A : str = tf.constant([[0, 1, 2, 3, 4, 5]]) __A : Optional[int] = model(_UpperCAmelCase)[0] __A : List[Any] = [1, 6, 768] self.assertEqual(output.shape , _UpperCAmelCase) __A : Tuple = tf.constant( [ [ [-0.03475493, -0.4686034, -0.30638832], [0.22637248, -0.26988646, -0.7423424], [0.10324868, -0.45013508, -0.58280784], ] ]) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4)
8
0
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black _lowerCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _lowerCamelCase = """ \"\"\" Output class for the scheduler's step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \"\"\" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None """ class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir ,"schedulers/" ) ) UpperCAmelCase_ : Tuple = self.diffusers_dir shutil.copy( os.path.join(_snake_case ,"src/diffusers/schedulers/scheduling_ddpm.py" ) ,os.path.join(self.diffusers_dir ,"schedulers/scheduling_ddpm.py" ) ,) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = "src/diffusers" shutil.rmtree(self.diffusers_dir ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ): UpperCAmelCase_ : Any = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code if overwrite_result is not None: UpperCAmelCase_ : List[str] = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result UpperCAmelCase_ : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=1_19 ) UpperCAmelCase_ : Union[str, Any] = black.format_str(_snake_case ,mode=_snake_case ) UpperCAmelCase_ : Any = os.path.join(self.diffusers_dir ,"new_code.py" ) with open(_snake_case ,"w" ,newline="\n" ) as f: f.write(_snake_case ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_snake_case ) ) == 0 ) else: check_copies.is_copy_consistent(f.name ,overwrite=_snake_case ) with open(_snake_case ,"r" ) as f: self.assertTrue(f.read() ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Dict = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" ) self.assertEqual(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): # Base copy consistency self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" ,"DDPMSchedulerOutput" ,REFERENCE_CODE + "\n" ,) # With no empty line at the end self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" ,"DDPMSchedulerOutput" ,_snake_case ,) # Copy consistency with rename self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" ,"TestSchedulerOutput" ,re.sub("DDPM" ,"Test" ,_snake_case ) ,) # Copy consistency with a really long name UpperCAmelCase_ : str = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' ,f'''{long_class_name}SchedulerOutput''' ,re.sub("Bert" ,_snake_case ,_snake_case ) ,) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" ,"TestSchedulerOutput" ,_snake_case ,overwrite_result=re.sub("DDPM" ,"Test" ,_snake_case ) ,)
71
'''simple docstring''' import argparse import os import re lowercase__ : Optional[int] = '''src/diffusers''' # Pattern that looks at the indentation in a line. lowercase__ : Dict = re.compile(r'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''') # Pattern that matches `"key",` and puts `key` in group 0. lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowercase__ : str = re.compile(r'''\[([^\]]+)\]''') def _lowerCAmelCase ( __snake_case : str ) -> Tuple: __A : List[Any] = _re_indent.search(__snake_case ) return "" if search is None else search.groups()[0] def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]: __A : Tuple = 0 __A : Optional[int] = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(__snake_case ): index += 1 __A : Optional[int] = ['\n'.join(lines[:index] )] else: __A : Any = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). __A : Tuple = [lines[index]] index += 1 while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(__snake_case ) ) if index < len(__snake_case ) - 1: __A : Union[str, Any] = [lines[index + 1]] index += 1 else: __A : Union[str, Any] = [] else: blocks.append('\n'.join(__snake_case ) ) __A : Optional[Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(__snake_case ) > 0: blocks.append('\n'.join(__snake_case ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(__snake_case ): blocks.append('\n'.join(lines[index:] ) ) return blocks def _lowerCAmelCase ( __snake_case : List[Any] ) -> int: def _inner(__snake_case : List[Any] ): return key(__snake_case ).lower().replace('_' , '' ) return _inner def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]: # If no key is provided, we use a noop. def noop(__snake_case : List[Any] ): return x if key is None: __A : Optional[Any] = noop # Constants are all uppercase, they go first. __A : str = [obj for obj in objects if key(__snake_case ).isupper()] # Classes are not all uppercase but start with a capital, they go second. __A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()] # Functions begin with a lowercase, they go last. __A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()] __A : Tuple = ignore_underscore(__snake_case ) return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple: # This inner function sort imports between [ ]. def _replace(__snake_case : Tuple ): __A : List[str] = match.groups()[0] if "," not in imports: return f'[{imports}]' __A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __A : Dict = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]" __A : List[Any] = import_statement.split('\n' ) if len(__snake_case ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. __A : Optional[int] = 2 if lines[1].strip() == '[' else 1 __A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] __A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] ) __A : Any = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(__snake_case ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: __A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] ) else: __A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __A : Tuple = keys[:-1] __A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] ) return "\n".join(__snake_case ) else: # Finally we have to deal with imports fitting on one line __A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case ) return import_statement def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]: with open(__snake_case , 'r' ) as f: __A : Dict = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 __A : str = split_code_in_indented_blocks( __snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(__snake_case ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. __A : Tuple = main_blocks[block_idx] __A : int = block.split('\n' ) # Get to the start of the imports. __A : Tuple = 0 while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: __A : Optional[int] = len(__snake_case ) else: line_idx += 1 if line_idx >= len(__snake_case ): continue # Ignore beginning and last line: they don't contain anything. __A : Dict = '\n'.join(block_lines[line_idx:-1] ) __A : int = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. __A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case ) # We have two categories of import key: list or _import_structure[key].append/extend __A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. __A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. __A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None] __A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. __A : str = 0 __A : Any = [] for i in range(len(__snake_case ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: __A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(__snake_case ) count += 1 # And we put our main block back together with its first and last line. __A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(__snake_case ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(__snake_case , 'w' ) as f: f.write('\n'.join(__snake_case ) ) def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]: __A : Tuple = [] for root, _, files in os.walk(__snake_case ): if "__init__.py" in files: __A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case ) if result: __A : Dict = [os.path.join(__snake_case , '__init__.py' )] if len(__snake_case ) > 0: raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' ) if __name__ == "__main__": lowercase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') lowercase__ : Union[str, Any] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
8
0
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class __magic_name__ ( __SCREAMING_SNAKE_CASE ): @staticmethod @abstractmethod def _A( snake_case_ ): raise NotImplementedError() @abstractmethod def _A( self ): raise NotImplementedError()
72
'''simple docstring''' def _lowerCAmelCase ( __snake_case : int ) -> bool: return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('''Program to check whether a number is a Perfect number or not...''') lowercase__ : int = int(input('''Enter number: ''').strip()) print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
8
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig a_ : Optional[Any] = logging.get_logger(__name__) a_ : Tuple = { 'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json', # See all DPT models at https://huggingface.co/models?filter=dpt } class _snake_case ( A__ ): _lowercase : Optional[int] = '''dpt''' def __init__( self , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.0 , a=0.0 , a=0.02 , a=1E-12 , a=384 , a=16 , a=3 , a=False , a=True , a=[2, 5, 8, 11] , a="project" , a=[4, 2, 1, 0.5] , a=[96, 192, 384, 768] , a=256 , a=-1 , a=False , a=True , a=0.4 , a=255 , a=0.1 , a=[1, 1024, 24, 24] , a=[0, 1] , a=None , **a , ) -> List[str]: super().__init__(**a) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('Initializing the config with a `BiT` backbone.') SCREAMING_SNAKE_CASE = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, } SCREAMING_SNAKE_CASE = BitConfig(**a) elif isinstance(a , a): logger.info('Initializing the config with a `BiT` backbone.') SCREAMING_SNAKE_CASE = BitConfig(**a) elif isinstance(a , a): SCREAMING_SNAKE_CASE = backbone_config else: raise ValueError( f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''') SCREAMING_SNAKE_CASE = backbone_featmap_shape SCREAMING_SNAKE_CASE = neck_ignore_stages if readout_type != "project": raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.') else: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = qkv_bias SCREAMING_SNAKE_CASE = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']') SCREAMING_SNAKE_CASE = readout_type SCREAMING_SNAKE_CASE = reassemble_factors SCREAMING_SNAKE_CASE = neck_hidden_sizes SCREAMING_SNAKE_CASE = fusion_hidden_size SCREAMING_SNAKE_CASE = head_in_index SCREAMING_SNAKE_CASE = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) SCREAMING_SNAKE_CASE = use_auxiliary_head SCREAMING_SNAKE_CASE = auxiliary_loss_weight SCREAMING_SNAKE_CASE = semantic_loss_ignore_index SCREAMING_SNAKE_CASE = semantic_classifier_dropout def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__) if output["backbone_config"] is not None: SCREAMING_SNAKE_CASE = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE = self.__class__.model_type return output
73
'''simple docstring''' import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() lowercase__ : Tuple = logging.get_logger(__name__) lowercase__ : str = [ ['''attention''', '''attn'''], ['''encoder_attention''', '''encoder_attn'''], ['''q_lin''', '''q_proj'''], ['''k_lin''', '''k_proj'''], ['''v_lin''', '''v_proj'''], ['''out_lin''', '''out_proj'''], ['''norm_embeddings''', '''layernorm_embedding'''], ['''position_embeddings''', '''embed_positions'''], ['''embeddings''', '''embed_tokens'''], ['''ffn.lin''', '''fc'''], ] def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple: if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: __A : Optional[Any] = k.replace(__snake_case , __snake_case ) if k.startswith('encoder' ): __A : Any = k.replace('.attn' , '.self_attn' ) __A : Any = k.replace('norm1' , 'self_attn_layer_norm' ) __A : str = k.replace('norm2' , 'final_layer_norm' ) elif k.startswith('decoder' ): __A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' ) __A : str = k.replace('norm2' , 'encoder_attn_layer_norm' ) __A : int = k.replace('norm3' , 'final_layer_norm' ) return k def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict: __A : Optional[int] = [ 'model.encoder.layernorm_embedding.weight', 'model.encoder.layernorm_embedding.bias', 'model.decoder.layernorm_embedding.weight', 'model.decoder.layernorm_embedding.bias', ] for k in keys: __A : Tuple = sd.pop(__snake_case ) __A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' ) assert new_k not in sd __A : str = v lowercase__ : Tuple = ['''START'''] @torch.no_grad() def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int: __A : List[str] = torch.load(__snake_case , map_location='cpu' ) __A : Tuple = model['model'] __A : str = BlenderbotConfig.from_json_file(__snake_case ) __A : int = BlenderbotForConditionalGeneration(__snake_case ) __A : List[Any] = m.model.state_dict().keys() __A : Optional[int] = [] __A : Optional[int] = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue __A : Union[str, Any] = rename_state_dict_key(__snake_case ) if new_k not in valid_keys: failures.append([k, new_k] ) else: __A : Optional[Any] = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(__snake_case ) m.model.load_state_dict(__snake_case , strict=__snake_case ) m.half() m.save_pretrained(__snake_case ) if __name__ == "__main__": lowercase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''') parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''') parser.add_argument( '''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use''' ) lowercase__ : Optional[Any] = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
8
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""", } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''mra''' def __init__( self : List[Any] , _A : Any=5_0265 , _A : Tuple=768 , _A : Any=12 , _A : Union[str, Any]=12 , _A : str=3072 , _A : int="gelu" , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : List[str]=512 , _A : Tuple=1 , _A : Union[str, Any]=0.02 , _A : List[str]=1e-5 , _A : Optional[int]="absolute" , _A : List[str]=4 , _A : int="full" , _A : Optional[Any]=0 , _A : int=0 , _A : int=1 , _A : Union[str, Any]=0 , _A : int=2 , **_A : Optional[Any] , ): """simple docstring""" super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A ) __SCREAMING_SNAKE_CASE : Tuple = vocab_size __SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : int = hidden_size __SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size __SCREAMING_SNAKE_CASE : int = hidden_act __SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Optional[int] = initializer_range __SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type __SCREAMING_SNAKE_CASE : Optional[Any] = block_per_row __SCREAMING_SNAKE_CASE : Union[str, Any] = approx_mode __SCREAMING_SNAKE_CASE : str = initial_prior_first_n_blocks __SCREAMING_SNAKE_CASE : Tuple = initial_prior_diagonal_n_blocks
74
'''simple docstring''' import mpmath # for roots of unity import numpy as np class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None): '''simple docstring''' __A : List[Any] = list(poly_a or [0])[:] __A : Optional[int] = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() __A : Union[str, Any] = len(self.polyA) while self.polyB[-1] == 0: self.polyB.pop() __A : Optional[int] = len(self.polyB) # Add 0 to make lengths equal a power of 2 __A : Optional[Any] = int( 2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1))) while len(self.polyA) < self.c_max_length: self.polyA.append(0) while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform __A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1)) # The product __A : Tuple = self.__multiply() def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' __A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB] # Corner case if len(_UpperCAmelCase) <= 1: return dft[0] # __A : Dict = self.c_max_length // 2 while next_ncol > 0: __A : Optional[Any] = [[] for i in range(_UpperCAmelCase)] __A : Tuple = self.root**next_ncol # First half of next step __A : Optional[Any] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(_UpperCAmelCase): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step __A : List[str] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(_UpperCAmelCase): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root # Update __A : Optional[int] = new_dft __A : Tuple = next_ncol // 2 return dft[0] def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.__dft('A') __A : Optional[Any] = self.__dft('B') __A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] del dft_a del dft_b # Corner Case if len(inverce_c[0]) <= 1: return inverce_c[0] # Inverse DFT __A : Dict = 2 while next_ncol <= self.c_max_length: __A : Optional[int] = [[] for i in range(_UpperCAmelCase)] __A : Any = self.root ** (next_ncol // 2) __A : Tuple = 1 # First half of next step for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root)) current_root *= root # Update __A : int = new_inverse_c next_ncol *= 2 # Unpack __A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self): '''simple docstring''' __A : int = 'A = ' + ' + '.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A])) __A : Optional[Any] = 'B = ' + ' + '.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B])) __A : str = 'A*B = ' + ' + '.join( F'{coef}*x^{i}' for coef, i in enumerate(self.product)) return F'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
8
0
'''simple docstring''' def a__ ( lowerCAmelCase__ = 1_00_00_00 ) -> int: UpperCAmelCase__ : str = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = {1: 1} for inputa in range(2 , lowerCAmelCase__ ): UpperCAmelCase__ : List[Any] = 0 UpperCAmelCase__ : Dict = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: UpperCAmelCase__ : Any = (3 * number) + 1 counter += 1 if inputa not in counters: UpperCAmelCase__ : List[Any] = counter if counter > pre_counter: UpperCAmelCase__ : List[Any] = inputa UpperCAmelCase__ : Any = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
75
'''simple docstring''' import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ): '''simple docstring''' __A : Union[str, Any] = parent __A : Tuple = batch_size __A : List[str] = image_size __A : Dict = patch_size __A : Optional[Any] = num_channels __A : Tuple = is_training __A : Dict = use_labels __A : List[Any] = hidden_size __A : Tuple = num_hidden_layers __A : int = num_attention_heads __A : Optional[int] = intermediate_size __A : Tuple = hidden_act __A : Any = hidden_dropout_prob __A : Optional[Any] = attention_probs_dropout_prob __A : List[Any] = type_sequence_label_size __A : List[Any] = initializer_range __A : Optional[int] = num_labels __A : List[Any] = scope __A : Any = n_targets __A : Union[str, Any] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens __A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size) __A : int = num_patches + 1 + self.num_detection_tokens def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]]) __A : Tuple = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) __A : List[Any] = [] for i in range(self.batch_size): __A : Optional[int] = {} __A : Union[str, Any] = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase) __A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase) labels.append(_UpperCAmelCase) __A : Any = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Any = YolosModel(config=_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : Dict = model(_UpperCAmelCase) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Any = YolosForObjectDetection(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : str = model(pixel_values=_UpperCAmelCase) __A : List[str] = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4)) __A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Any = self.prepare_config_and_inputs() __A ,__A ,__A : Tuple = config_and_inputs __A : Tuple = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ): lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else () lowerCAmelCase = ( {'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False): '''simple docstring''' __A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase) if return_labels: if model_class.__name__ == "YolosForObjectDetection": __A : Any = [] for i in range(self.model_tester.batch_size): __A : Tuple = {} __A : Tuple = torch.ones( size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long) __A : Optional[Any] = torch.ones( self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float) labels.append(_UpperCAmelCase) __A : str = labels return inputs_dict def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = YolosModelTester(self) __A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : Tuple = model_class(_UpperCAmelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) __A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : List[Any] = model_class(_UpperCAmelCase) __A : str = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __A : int = [*signature.parameters.keys()] __A : List[str] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common() __A : Optional[int] = True # in YOLOS, the seq_len is different __A : Dict = self.model_tester.expected_seq_len for model_class in self.all_model_classes: __A : Dict = True __A : Dict = False __A : Union[str, Any] = True __A : Tuple = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Union[str, Any] = outputs.attentions self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] __A : List[Any] = True __A : List[str] = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Optional[Any] = outputs.attentions self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) __A : str = len(_UpperCAmelCase) # Check attention is always last and order is fine __A : Dict = True __A : Dict = True __A : Dict = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Union[str, Any] = 1 self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase)) __A : Optional[Any] = outputs.attentions self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): __A : Tuple = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Optional[Any] = outputs.hidden_states __A : List[str] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase) # YOLOS has a different seq_length __A : Dict = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) __A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : List[str] = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __A : Optional[int] = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase) self.assertIsNotNone(_UpperCAmelCase) def _lowerCAmelCase ( ) -> int: __A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE (unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase) __A : Any = self.default_image_processor __A : str = prepare_img() __A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase) # forward pass with torch.no_grad(): __A : str = model(inputs.pixel_values) # verify outputs __A : Tuple = torch.Size((1, 100, 92)) self.assertEqual(outputs.logits.shape , _UpperCAmelCase) __A : Dict = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , ) __A : int = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4)) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4)) # verify postprocessing __A : List[str] = image_processor.post_process_object_detection( _UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0] __A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase) __A : Union[str, Any] = [75, 75, 17, 63, 17] __A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase) self.assertEqual(len(results['scores']) , 5) self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4)) self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase) self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase))
8
0
"""simple docstring""" # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys a_ = '3' print('Python version:', sys.version) print('OS platform:', platform.platform()) print('OS architecture:', platform.machine()) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) except ImportError: print('Torch version:', None) try: import transformers print('transformers version:', transformers.__version__) except ImportError: print('transformers version:', None)
76
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: lowercase__ : Optional[int] = None lowercase__ : List[str] = logging.get_logger(__name__) lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} lowercase__ : List[str] = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''', }, } lowercase__ : Dict = { '''camembert-base''': 5_12, } lowercase__ : str = '''▁''' class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = ['''input_ids''', '''attention_mask'''] lowerCAmelCase = CamembertTokenizer def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ): '''simple docstring''' __A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , ) __A : List[str] = vocab_file __A : Optional[int] = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __A : Optional[Any] = [self.cls_token_id] __A : Optional[int] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None): '''simple docstring''' __A : Optional[int] = [self.sep_token_id] __A : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(_UpperCAmelCase): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return __A : List[Any] = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase): copyfile(self.vocab_file , _UpperCAmelCase) return (out_vocab_file,)
8
0
"""simple docstring""" import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors A = logging.getLogger(__name__) class a__ ( __magic_name__ ): lowercase_ = "sequence-classification" def __init__( self : Optional[Any] , UpperCamelCase_ : int): """simple docstring""" if type(UpperCamelCase_) == dict: __UpperCAmelCase : Optional[int] = Namespace(**UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = glue_output_modes[hparams.task] __UpperCAmelCase : Optional[Any] = glue_tasks_num_labels[hparams.task] super().__init__(UpperCamelCase_ , UpperCamelCase_ , self.mode) def a_ ( self : Optional[Any] , **UpperCamelCase_ : Optional[int]): """simple docstring""" return self.model(**UpperCamelCase_) def a_ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : Optional[int] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __UpperCAmelCase : str = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None __UpperCAmelCase : Optional[Any] = self(**UpperCamelCase_) __UpperCAmelCase : int = outputs[0] __UpperCAmelCase : str = self.trainer.lr_schedulers[0]["scheduler"] __UpperCAmelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Any = self.hparams __UpperCAmelCase : List[Any] = processors[args.task]() __UpperCAmelCase : Dict = processor.get_labels() for mode in ["train", "dev"]: __UpperCAmelCase : str = self._feature_file(UpperCamelCase_) if os.path.exists(UpperCamelCase_) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , UpperCamelCase_) else: logger.info("Creating features from dataset file at %s" , args.data_dir) __UpperCAmelCase : Optional[int] = ( processor.get_dev_examples(args.data_dir) if mode == "dev" else processor.get_train_examples(args.data_dir) ) __UpperCAmelCase : List[Any] = convert_examples_to_features( UpperCamelCase_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , UpperCamelCase_) torch.save(UpperCamelCase_ , UpperCamelCase_) def a_ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : bool = False): """simple docstring""" __UpperCAmelCase : Dict = "dev" if mode == "test" else mode __UpperCAmelCase : Dict = self._feature_file(UpperCamelCase_) logger.info("Loading features from cached file %s" , UpperCamelCase_) __UpperCAmelCase : Dict = torch.load(UpperCamelCase_) __UpperCAmelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long) __UpperCAmelCase : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long) __UpperCAmelCase : Optional[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long) if self.hparams.glue_output_mode == "classification": __UpperCAmelCase : int = torch.tensor([f.label for f in features] , dtype=torch.long) elif self.hparams.glue_output_mode == "regression": __UpperCAmelCase : Optional[Any] = torch.tensor([f.label for f in features] , dtype=torch.float) return DataLoader( TensorDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) , batch_size=UpperCamelCase_ , shuffle=UpperCamelCase_ , ) def a_ ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any]): """simple docstring""" __UpperCAmelCase : List[str] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __UpperCAmelCase : List[Any] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None __UpperCAmelCase : Any = self(**UpperCamelCase_) __UpperCAmelCase , __UpperCAmelCase : Tuple = outputs[:2] __UpperCAmelCase : List[Any] = logits.detach().cpu().numpy() __UpperCAmelCase : List[Any] = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def a_ ( self : List[Any] , UpperCamelCase_ : List[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs]).mean().detach().cpu().item() __UpperCAmelCase : int = np.concatenate([x["pred"] for x in outputs] , axis=0) if self.hparams.glue_output_mode == "classification": __UpperCAmelCase : str = np.argmax(UpperCamelCase_ , axis=1) elif self.hparams.glue_output_mode == "regression": __UpperCAmelCase : Union[str, Any] = np.squeeze(UpperCamelCase_) __UpperCAmelCase : List[str] = np.concatenate([x["target"] for x in outputs] , axis=0) __UpperCAmelCase : Optional[Any] = [[] for _ in range(out_label_ids.shape[0])] __UpperCAmelCase : Any = [[] for _ in range(out_label_ids.shape[0])] __UpperCAmelCase : Union[str, Any] = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase_ , UpperCamelCase_)} __UpperCAmelCase : Optional[int] = dict(results.items()) __UpperCAmelCase : str = results return ret, preds_list, out_label_list def a_ ( self : Union[str, Any] , UpperCamelCase_ : list): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = self._eval_end(UpperCamelCase_) __UpperCAmelCase : List[Any] = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def a_ ( self : List[str] , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self._eval_end(UpperCamelCase_) __UpperCAmelCase : Any = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def a_ ( UpperCamelCase_ : Any , UpperCamelCase_ : List[Any]): """simple docstring""" BaseTransformer.add_model_specific_args(UpperCamelCase_ , UpperCamelCase_) parser.add_argument( "--max_seq_length" , default=128 , type=UpperCamelCase_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=UpperCamelCase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets") return parser def _UpperCamelCase ( ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : str = argparse.ArgumentParser() add_generic_args(UpperCamelCase , os.getcwd() ) __UpperCAmelCase : int = GLUETransformer.add_model_specific_args(UpperCamelCase , os.getcwd() ) __UpperCAmelCase : List[str] = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: __UpperCAmelCase : Union[str, Any] = os.path.join( "./results" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , ) os.makedirs(args.output_dir ) __UpperCAmelCase : Tuple = GLUETransformer(UpperCamelCase ) __UpperCAmelCase : int = generic_train(UpperCamelCase , UpperCamelCase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: __UpperCAmelCase : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=UpperCamelCase ) ) __UpperCAmelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(UpperCamelCase ) if __name__ == "__main__": main()
77
'''simple docstring''' import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowercase__ : Any = '''hf-internal-testing/tiny-random-bert''' lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''') lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6''' class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_UpperCAmelCase)) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase))) with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f: __A : Any = f.read() self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase)) self.assertTrue(os.path.isfile(_UpperCAmelCase)) # File is cached at the same place the second time. __A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase) # Using a specific revision to test the full commit hash. __A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223') self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'): __A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase) with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'): __A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa') with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'): __A : int = cached_file(_UpperCAmelCase , 'conf') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'): __A : Any = cached_file(_UpperCAmelCase , 'conf') with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f: __A : Dict = f.read() self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf'))) __A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase) self.assertIsNone(_UpperCAmelCase) __A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase) self.assertIsNone(_UpperCAmelCase) __A : List[str] = mock.Mock() __A : Dict = 500 __A : List[str] = {} __A : List[Any] = HTTPError __A : Optional[Any] = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head: __A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase) self.assertIsNone(_UpperCAmelCase) # This check we did call the fake head request mock_head.assert_called() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase)) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase)) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt')) # The function raises if the repository does not exist. with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'): get_file_from_repo('bert-base-case' , _UpperCAmelCase) # The function raises if the revision does not exist. with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'): get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha') __A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase) # The name is the cached name which is not very easy to test, so instead we load the content. __A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read()) self.assertEqual(config['hidden_size'] , 768) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: __A : Tuple = Path(_UpperCAmelCase) / 'a.txt' filename.touch() self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase)) self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt'))
8
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int = 10_00 ) -> int: '''simple docstring''' return sum(e for e in range(3 , snake_case_ ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(f"{solution() = }")
78
'''simple docstring''' import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any: __A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case ) __A : int = AutoModelForSeqaSeqLM.from_config(__snake_case ) model.save_pretrained(__snake_case ) AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
8
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: SCREAMING_SNAKE_CASE__ : List[Any] = None SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE__ : str = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } SCREAMING_SNAKE_CASE__ : List[Any] = { """google/bigbird-roberta-base""": 40_96, """google/bigbird-roberta-large""": 40_96, """google/bigbird-base-trivia-itc""": 40_96, } SCREAMING_SNAKE_CASE__ : str = """▁""" class UpperCAmelCase_ ( __lowerCamelCase ): __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = BigBirdTokenizer __lowerCamelCase = ['input_ids', 'attention_mask'] __lowerCamelCase = [] def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase="[CLS]" , **_lowerCAmelCase , ): UpperCAmelCase__ : int = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else bos_token UpperCAmelCase__ : Any = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else eos_token UpperCAmelCase__ : str = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token UpperCAmelCase__ : List[Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else pad_token UpperCAmelCase__ : Dict = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else cls_token UpperCAmelCase__ : Union[str, Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase__ : List[Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token super().__init__( _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , ) UpperCAmelCase__ : List[str] = vocab_file UpperCAmelCase__ : Union[str, Any] = False if not self.vocab_file else True def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): UpperCAmelCase__ : List[str] = [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(_lowerCAmelCase )) + [1] return [1] + ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1] def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): UpperCAmelCase__ : Any = [self.sep_token_id] UpperCAmelCase__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_lowerCAmelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return UpperCAmelCase__ : Optional[int] = os.path.join( _lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ): copyfile(self.vocab_file , _lowerCAmelCase ) return (out_vocab_file,)
79
'''simple docstring''' from ...configuration_utils import PretrainedConfig lowercase__ : Any = { '''google/tapas-base-finetuned-sqa''': ( '''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wtq''': ( '''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wikisql-supervised''': ( '''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json''' ), '''google/tapas-base-finetuned-tabfact''': ( '''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json''' ), } class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = '''tapas''' def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ): '''simple docstring''' super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) __A : Dict = vocab_size __A : Tuple = hidden_size __A : Any = num_hidden_layers __A : int = num_attention_heads __A : Tuple = hidden_act __A : Tuple = intermediate_size __A : List[Any] = hidden_dropout_prob __A : int = attention_probs_dropout_prob __A : List[str] = max_position_embeddings __A : Optional[int] = type_vocab_sizes __A : str = initializer_range __A : List[str] = layer_norm_eps # Fine-tuning task hyperparameters __A : List[str] = positive_label_weight __A : List[Any] = num_aggregation_labels __A : Optional[Any] = aggregation_loss_weight __A : Tuple = use_answer_as_supervision __A : List[str] = answer_loss_importance __A : Any = use_normalized_answer_loss __A : Any = huber_loss_delta __A : Union[str, Any] = temperature __A : Tuple = aggregation_temperature __A : Optional[Any] = use_gumbel_for_cells __A : List[str] = use_gumbel_for_aggregation __A : Tuple = average_approximation_function __A : List[str] = cell_selection_preference __A : Dict = answer_loss_cutoff __A : Union[str, Any] = max_num_rows __A : Optional[Any] = max_num_columns __A : int = average_logits_per_cell __A : Optional[Any] = select_one_column __A : int = allow_empty_column_selection __A : List[Any] = init_cell_selection_weights_to_zero __A : int = reset_position_index_per_cell __A : Union[str, Any] = disable_per_token_loss # Aggregation hyperparameters __A : Optional[Any] = aggregation_labels __A : List[str] = no_aggregation_label_index if isinstance(self.aggregation_labels , _UpperCAmelCase): __A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()}
8
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} __UpperCamelCase : int = { """vocab_file""": { """squeezebert/squeezebert-uncased""": ( """https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt""" ), """squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""", """squeezebert/squeezebert-mnli-headless""": ( """https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """squeezebert/squeezebert-uncased""": ( """https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json""" ), """squeezebert/squeezebert-mnli""": ( """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json""" ), """squeezebert/squeezebert-mnli-headless""": ( """https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json""" ), }, } __UpperCamelCase : Tuple = { """squeezebert/squeezebert-uncased""": 512, """squeezebert/squeezebert-mnli""": 512, """squeezebert/squeezebert-mnli-headless""": 512, } __UpperCamelCase : Optional[Any] = { """squeezebert/squeezebert-uncased""": {"""do_lower_case""": True}, """squeezebert/squeezebert-mnli""": {"""do_lower_case""": True}, """squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True}, } class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :Tuple = VOCAB_FILES_NAMES __snake_case :Any = PRETRAINED_VOCAB_FILES_MAP __snake_case :Optional[int] = PRETRAINED_INIT_CONFIGURATION __snake_case :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case :Union[str, Any] = SqueezeBertTokenizer def __init__( self : Optional[Any] , _lowerCAmelCase : str=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Tuple="[UNK]" , _lowerCAmelCase : int="[SEP]" , _lowerCAmelCase : str="[PAD]" , _lowerCAmelCase : Dict="[CLS]" , _lowerCAmelCase : Optional[int]="[MASK]" , _lowerCAmelCase : Any=True , _lowerCAmelCase : str=None , **_lowerCAmelCase : Optional[Any] , ) -> str: """simple docstring""" super().__init__( _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , ) __lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , _lowerCAmelCase ) != tokenize_chinese_chars ): __lowercase = getattr(_lowerCAmelCase , normalizer_state.pop("""type""" ) ) __lowercase = do_lower_case __lowercase = strip_accents __lowercase = tokenize_chinese_chars __lowercase = normalizer_class(**_lowerCAmelCase ) __lowercase = do_lower_case def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=None ) -> Tuple: """simple docstring""" __lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _a ( self : List[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" __lowercase = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
80
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize): '''simple docstring''' __A : Union[str, Any] = 'bilinear' __A : int = max_size __A : Optional[Any] = short_edge_length def __call__( self , _UpperCAmelCase): '''simple docstring''' __A : int = [] for img in imgs: __A ,__A : Dict = img.shape[:2] # later: provide list and randomly choose index for resize __A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1) if size == 0: return img __A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase) if h < w: __A ,__A : Optional[Any] = size, scale * w else: __A ,__A : Optional[Any] = scale * h, size if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size: __A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase) __A : Tuple = newh * scale __A : Dict = neww * scale __A : Dict = int(neww + 0.5) __A : Optional[int] = int(newh + 0.5) if img.dtype == np.uinta: __A : int = Image.fromarray(_UpperCAmelCase) __A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR) __A : Dict = np.asarray(_UpperCAmelCase) else: __A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw __A : Dict = nn.functional.interpolate( _UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0) img_augs.append(_UpperCAmelCase) return img_augs class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase): '''simple docstring''' __A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST) __A : List[Any] = cfg.INPUT.FORMAT __A : Dict = cfg.SIZE_DIVISIBILITY __A : str = cfg.PAD_VALUE __A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST __A : int = cfg.MODEL.DEVICE __A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) __A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) __A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' __A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images])) __A : Dict = [im.shape[-2:] for im in images] __A : Optional[int] = [ nn.functional.pad( _UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(_UpperCAmelCase , _UpperCAmelCase) ] return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase) def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False): '''simple docstring''' with torch.no_grad(): if not isinstance(_UpperCAmelCase , _UpperCAmelCase): __A : int = [images] if single_image: assert len(_UpperCAmelCase) == 1 for i in range(len(_UpperCAmelCase)): if isinstance(images[i] , torch.Tensor): images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float()) elif not isinstance(images[i] , torch.Tensor): images.insert( _UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format)) .to(self.device) .float() , ) # resize smallest edge __A : str = torch.tensor([im.shape[:2] for im in images]) __A : List[str] = self.aug(_UpperCAmelCase) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic __A : Any = [self.normalizer(_UpperCAmelCase) for x in images] # now pad them to do the following operations __A ,__A : Any = self.pad(_UpperCAmelCase) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad __A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict: boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int: assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!" __A ,__A : int = box_size tensor[:, 0].clamp_(min=0 , max=__snake_case ) tensor[:, 1].clamp_(min=0 , max=__snake_case ) tensor[:, 2].clamp_(min=0 , max=__snake_case ) tensor[:, 3].clamp_(min=0 , max=__snake_case )
8
0
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor _snake_case : Optional[int] = logging.get_logger(__name__) class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : Any , *lowerCamelCase : int , **lowerCamelCase : int ) -> None: warnings.warn( "The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use PerceiverImageProcessor instead." , lowerCamelCase , ) super().__init__(*lowerCamelCase , **lowerCamelCase )
81
'''simple docstring''' def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741 __A : Tuple = len(__snake_case ) __A : Optional[int] = 0 __A : str = [0] * n __A : int = [False] * n __A : Tuple = [False] * n def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ): if parent == root: out_edge_count += 1 __A : str = True __A : Tuple = at for to in l[at]: if to == parent: pass elif not visited[to]: __A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case ) __A : int = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: __A : Tuple = True # AP found via cycle if at == low[to]: __A : Optional[Any] = True else: __A : Any = min(low[at] , __snake_case ) return out_edge_count for i in range(__snake_case ): if not visited[i]: __A : Tuple = 0 __A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case ) __A : Union[str, Any] = out_edge_count > 1 for x in range(len(__snake_case ) ): if is_art[x] is True: print(__snake_case ) # Adjacency list of graph lowercase__ : Tuple = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
8
0
"""simple docstring""" import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowercase__ ( unittest.TestCase ): '''simple docstring''' @slow def lowercase__ ( self : int ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" ) UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" ) UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="np" ).input_ids UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="np" ).input_ids UpperCAmelCase_ = shift_tokens_right(_UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id ) UpperCAmelCase_ = model(_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ).logits UpperCAmelCase_ = optax.softmax_cross_entropy(_UpperCAmelCase , onehot(_UpperCAmelCase , logits.shape[-1] ) ).mean() UpperCAmelCase_ = -(labels.shape[-1] * loss.item()) UpperCAmelCase_ = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
82
'''simple docstring''' import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() lowercase__ : Union[str, Any] = logging.get_logger(__name__) lowercase__ : int = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''', '''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''', '''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } lowercase__ : Dict = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]: for attribute in key.split('.' ): __A : int = getattr(__snake_case , __snake_case ) if weight_type is not None: __A : Optional[int] = getattr(__snake_case , __snake_case ).shape else: __A : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": __A : Tuple = value elif weight_type == "weight_g": __A : Union[str, Any] = value elif weight_type == "weight_v": __A : Optional[Any] = value elif weight_type == "bias": __A : Optional[int] = value else: __A : Optional[int] = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]: __A : Optional[Any] = [] __A : Any = fairseq_model.state_dict() __A : Union[str, Any] = hf_model.feature_extractor for name, value in fairseq_dict.items(): __A : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , ) __A : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __A : int = True if "*" in mapped_key: __A : Any = name.split(__snake_case )[0].split('.' )[-2] __A : List[Any] = mapped_key.replace('*' , __snake_case ) if "weight_g" in name: __A : Optional[Any] = 'weight_g' elif "weight_v" in name: __A : Union[str, Any] = 'weight_v' elif "bias" in name and "relative_attention_bias" not in name: __A : Optional[Any] = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __A : Tuple = 'weight' else: __A : Dict = None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(f'Unused weights: {unused_weights}' ) def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int: __A : int = full_name.split('conv_layers.' )[-1] __A : List[str] = name.split('.' ) __A : Optional[int] = int(items[0] ) __A : str = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) __A : Optional[int] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) __A : Union[str, Any] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) __A : Dict = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) __A : Any = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(__snake_case ) @torch.no_grad() def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any: # load the pre-trained checkpoints __A : List[str] = torch.load(__snake_case ) __A : Dict = WavLMConfigOrig(checkpoint['cfg'] ) __A : Optional[int] = WavLMOrig(__snake_case ) model.load_state_dict(checkpoint['model'] ) model.eval() if config_path is not None: __A : List[Any] = WavLMConfig.from_pretrained(__snake_case ) else: __A : Dict = WavLMConfig() __A : Optional[Any] = WavLMModel(__snake_case ) recursively_load_weights(__snake_case , __snake_case ) hf_wavlm.save_pretrained(__snake_case ) if __name__ == "__main__": lowercase__ : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowercase__ : Any = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
8
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase__ = { '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
83
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = 42 class SCREAMING_SNAKE_CASE (a__ , a__ ): @register_to_config def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ): '''simple docstring''' super().__init__() __A : Dict = sample_size # time if time_embedding_type == "fourier": __A : int = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase) __A : Any = 2 * block_out_channels[0] elif time_embedding_type == "positional": __A : List[str] = Timesteps( block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase) __A : List[str] = block_out_channels[0] if use_timestep_embedding: __A : Optional[Any] = block_out_channels[0] * 4 __A : Optional[int] = TimestepEmbedding( in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , ) __A : Dict = nn.ModuleList([]) __A : Dict = None __A : Tuple = nn.ModuleList([]) __A : Tuple = None # down __A : Any = in_channels for i, down_block_type in enumerate(_UpperCAmelCase): __A : Tuple = output_channel __A : Optional[Any] = block_out_channels[i] if i == 0: input_channel += extra_in_channels __A : List[str] = i == len(_UpperCAmelCase) - 1 __A : int = get_down_block( _UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(_UpperCAmelCase) # mid __A : str = get_mid_block( _UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , ) # up __A : Optional[int] = list(reversed(_UpperCAmelCase)) __A : Optional[int] = reversed_block_out_channels[0] if out_block_type is None: __A : str = out_channels else: __A : List[Any] = block_out_channels[0] for i, up_block_type in enumerate(_UpperCAmelCase): __A : Optional[Any] = output_channel __A : Optional[Any] = ( reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels ) __A : Dict = i == len(_UpperCAmelCase) - 1 __A : str = get_up_block( _UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(_UpperCAmelCase) __A : Optional[int] = output_channel # out __A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32) __A : Optional[Any] = get_out_block( out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , ) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ): '''simple docstring''' __A : Any = timestep if not torch.is_tensor(_UpperCAmelCase): __A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device) elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0: __A : Any = timesteps[None].to(sample.device) __A : List[Any] = self.time_proj(_UpperCAmelCase) if self.config.use_timestep_embedding: __A : Dict = self.time_mlp(_UpperCAmelCase) else: __A : Dict = timestep_embed[..., None] __A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) __A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:])) # 2. down __A : int = () for downsample_block in self.down_blocks: __A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase) down_block_res_samples += res_samples # 3. mid if self.mid_block: __A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase) # 4. up for i, upsample_block in enumerate(self.up_blocks): __A : Any = down_block_res_samples[-1:] __A : Optional[int] = down_block_res_samples[:-1] __A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase) # 5. post-process if self.out_block: __A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase) if not return_dict: return (sample,) return UNetaDOutput(sample=_UpperCAmelCase)
8
0
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''microsoft/conditional-detr-resnet-50''': ( '''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json''' ), } class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : List[Any] = """conditional_detr""" _UpperCamelCase : Any = ["""past_key_values"""] _UpperCamelCase : Optional[Any] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=300 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=0.25 , **snake_case , ): if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' ) if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) lowercase = CONFIG_MAPPING['resnet'](out_features=['stage4'] ) elif isinstance(snake_case , snake_case ): lowercase = backbone_config.get('model_type' ) lowercase = CONFIG_MAPPING[backbone_model_type] lowercase = config_class.from_dict(snake_case ) lowercase = use_timm_backbone lowercase = backbone_config lowercase = num_channels lowercase = num_queries lowercase = d_model lowercase = encoder_ffn_dim lowercase = encoder_layers lowercase = encoder_attention_heads lowercase = decoder_ffn_dim lowercase = decoder_layers lowercase = decoder_attention_heads lowercase = dropout lowercase = attention_dropout lowercase = activation_dropout lowercase = activation_function lowercase = init_std lowercase = init_xavier_std lowercase = encoder_layerdrop lowercase = decoder_layerdrop lowercase = encoder_layers lowercase = auxiliary_loss lowercase = position_embedding_type lowercase = backbone lowercase = use_pretrained_backbone lowercase = dilation # Hungarian matcher lowercase = class_cost lowercase = bbox_cost lowercase = giou_cost # Loss coefficients lowercase = mask_loss_coefficient lowercase = dice_loss_coefficient lowercase = cls_loss_coefficient lowercase = bbox_loss_coefficient lowercase = giou_loss_coefficient lowercase = focal_alpha super().__init__(is_encoder_decoder=snake_case , **snake_case ) @property def SCREAMING_SNAKE_CASE__ ( self ): return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE__ ( self ): return self.d_model def SCREAMING_SNAKE_CASE__ ( self ): lowercase = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowercase = self.backbone_config.to_dict() lowercase = self.__class__.model_type return output class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : List[str] = version.parse("""1.11""" ) @property def SCREAMING_SNAKE_CASE__ ( self ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): return 1E-5 @property def SCREAMING_SNAKE_CASE__ ( self ): return 12
84
'''simple docstring''' def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int: if len(__snake_case ) != len(__snake_case ): raise ValueError('String lengths must match!' ) __A : Optional[Any] = 0 for chara, chara in zip(__snake_case , __snake_case ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
8
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Optional[int] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["EncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = ["TFEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = ["FlaxEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
'''simple docstring''' import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() lowercase__ : Tuple = logging.get_logger(__name__) def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]: __A : int = RobertaPreLayerNormConfig.from_pretrained( __snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] ) # convert state_dict __A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) ) __A : str = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.' ): __A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ): continue __A : str = tensor_value __A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case ) model.save_pretrained(__snake_case ) # convert tokenizer __A : List[Any] = AutoTokenizer.from_pretrained(__snake_case ) tokenizer.save_pretrained(__snake_case ) if __name__ == "__main__": lowercase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowercase__ : Optional[Any] = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
8
0
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" if "img_encoder.pos_embed" in name: A_ = name.replace("img_encoder.pos_embed" ,"vision_model.embeddings.position_embeddings" ) if "img_encoder.patch_embed.proj" in name: A_ = name.replace("img_encoder.patch_embed.proj" ,"vision_model.embeddings.patch_embeddings.projection" ) if "img_encoder.patch_embed.norm" in name: A_ = name.replace("img_encoder.patch_embed.norm" ,"vision_model.embeddings.layernorm" ) if "img_encoder.layers" in name: A_ = name.replace("img_encoder.layers" ,"vision_model.encoder.stages" ) if "blocks" in name and "res" not in name: A_ = name.replace("blocks" ,"layers" ) if "attn" in name and "pre_assign" not in name: A_ = name.replace("attn" ,"self_attn" ) if "proj" in name and "self_attn" in name and "text" not in name: A_ = name.replace("proj" ,"out_proj" ) if "pre_assign_attn.attn.proj" in name: A_ = name.replace("pre_assign_attn.attn.proj" ,"pre_assign_attn.attn.out_proj" ) if "norm1" in name: A_ = name.replace("norm1" ,"layer_norm1" ) if "norm2" in name and "pre_assign" not in name: A_ = name.replace("norm2" ,"layer_norm2" ) if "img_encoder.norm" in name: A_ = name.replace("img_encoder.norm" ,"vision_model.layernorm" ) # text encoder if "text_encoder.token_embedding" in name: A_ = name.replace("text_encoder.token_embedding" ,"text_model.embeddings.token_embedding" ) if "text_encoder.positional_embedding" in name: A_ = name.replace("text_encoder.positional_embedding" ,"text_model.embeddings.position_embedding.weight" ) if "text_encoder.transformer.resblocks." in name: A_ = name.replace("text_encoder.transformer.resblocks." ,"text_model.encoder.layers." ) if "ln_1" in name: A_ = name.replace("ln_1" ,"layer_norm1" ) if "ln_2" in name: A_ = name.replace("ln_2" ,"layer_norm2" ) if "c_fc" in name: A_ = name.replace("c_fc" ,"fc1" ) if "c_proj" in name: A_ = name.replace("c_proj" ,"fc2" ) if "text_encoder" in name: A_ = name.replace("text_encoder" ,"text_model" ) if "ln_final" in name: A_ = name.replace("ln_final" ,"final_layer_norm" ) # projection layers if "img_projector.linear_hidden." in name: A_ = name.replace("img_projector.linear_hidden." ,"visual_projection." ) if "img_projector.linear_out." in name: A_ = name.replace("img_projector.linear_out." ,"visual_projection.3." ) if "text_projector.linear_hidden" in name: A_ = name.replace("text_projector.linear_hidden" ,"text_projection" ) if "text_projector.linear_out" in name: A_ = name.replace("text_projector.linear_out" ,"text_projection.3" ) return name def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ): """simple docstring""" for key in orig_state_dict.copy().keys(): A_ = orig_state_dict.pop(__UpperCamelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors A_ = key.split("." ) A_ , A_ = int(key_split[2] ), int(key_split[4] ) A_ = config.vision_config.hidden_size if "weight" in key: A_ = val[:dim, :] A_ = val[dim : dim * 2, :] A_ = val[-dim:, :] else: A_ = val[:dim] A_ = val[dim : dim * 2] A_ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors A_ = key.split("." ) A_ = int(key_split[3] ) A_ = config.text_config.hidden_size if "weight" in key: A_ = val[:dim, :] A_ = val[ dim : dim * 2, : ] A_ = val[-dim:, :] else: A_ = val[:dim] A_ = val[dim : dim * 2] A_ = val[-dim:] else: A_ = rename_key(__UpperCamelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): A_ = val.squeeze_() else: A_ = val return orig_state_dict def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int]="groupvit-gcc-yfcc" ,__UpperCamelCase : List[Any]=False ): """simple docstring""" A_ = GroupViTConfig() A_ = GroupViTModel(__UpperCamelCase ).eval() A_ = torch.load(__UpperCamelCase ,map_location="cpu" )["model"] A_ = convert_state_dict(__UpperCamelCase ,__UpperCamelCase ) A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__UpperCamelCase ) == 0) # verify result A_ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" ) A_ = prepare_img() A_ = processor(text=["a photo of a cat", "a photo of a dog"] ,images=__UpperCamelCase ,padding=__UpperCamelCase ,return_tensors="pt" ) with torch.no_grad(): A_ = model(**__UpperCamelCase ) if model_name == "groupvit-gcc-yfcc": A_ = torch.tensor([[13.3523, 6.3629]] ) elif model_name == "groupvit-gcc-redcaps": A_ = torch.tensor([[16.1873, 8.6230]] ) else: raise ValueError(f'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image ,__UpperCamelCase ,atol=1E-3 ) processor.save_pretrained(__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) print("Successfully saved processor and model to" ,__UpperCamelCase ) if push_to_hub: print("Pushing to the hub..." ) processor.push_to_hub(__UpperCamelCase ,organization="nielsr" ) model.push_to_hub(__UpperCamelCase ,organization="nielsr" ) if __name__ == "__main__": __a :Tuple = argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.' ) parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint') parser.add_argument( '--model_name', default='groupvit-gccy-fcc', type=str, help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.', ) __a :int = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
86
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings lowercase__ : Dict = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} ) lowerCAmelCase = field( default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': ( '''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `max_length` value of the model configuration.''' ) } , ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': ( '''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `num_beams` value of the model configuration.''' ) } , ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.''' } , ) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = super().to_dict() for k, v in d.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase): __A : List[Any] = v.to_dict() return d
8
0
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys _lowerCamelCase : Optional[int] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""") _lowerCamelCase : Any = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split() _lowerCamelCase : Optional[int] = """|""".join(sys.argv[1:]) _lowerCamelCase : str = re.compile(rF'''^({joined_dirs}).*?\.py$''') _lowerCamelCase : List[str] = [x for x in modified_files if regex.match(x)] print(""" """.join(relevant_modified_files), end="""""")
87
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : List[Any] = logging.get_logger(__name__) lowercase__ : Optional[int] = { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = '''lxmert''' lowerCAmelCase = {} def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ): '''simple docstring''' __A : Tuple = vocab_size __A : int = hidden_size __A : str = num_attention_heads __A : Tuple = hidden_act __A : int = intermediate_size __A : str = hidden_dropout_prob __A : Optional[int] = attention_probs_dropout_prob __A : Optional[Any] = max_position_embeddings __A : Tuple = type_vocab_size __A : Optional[int] = initializer_range __A : Any = layer_norm_eps __A : Optional[Any] = num_qa_labels __A : Optional[int] = num_object_labels __A : Any = num_attr_labels __A : Union[str, Any] = l_layers __A : Optional[int] = x_layers __A : List[Any] = r_layers __A : Tuple = visual_feat_dim __A : Tuple = visual_pos_dim __A : Optional[int] = visual_loss_normalizer __A : int = task_matched __A : List[Any] = task_mask_lm __A : Optional[Any] = task_obj_predict __A : str = task_qa __A : List[Any] = visual_obj_loss __A : Optional[Any] = visual_attr_loss __A : Union[str, Any] = visual_feat_loss __A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers} super().__init__(**_UpperCAmelCase)
8
0
"""simple docstring""" import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class lowercase__ ( unittest.TestCase ): __UpperCAmelCase = inspect.getfile(accelerate.test_utils ) __UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] ) __UpperCAmelCase = ['''accelerate''', '''launch'''] __UpperCAmelCase = Path.home() / '''.cache/huggingface/accelerate''' __UpperCAmelCase = '''default_config.yaml''' __UpperCAmelCase = config_folder / config_file __UpperCAmelCase = config_folder / '''_default_config.yaml''' __UpperCAmelCase = Path('''tests/test_configs''' ) @classmethod def UpperCamelCase_ ( cls) -> List[str]: if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path) @classmethod def UpperCamelCase_ ( cls) -> Tuple: if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : Dict = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy()) def UpperCamelCase_ ( self) -> int: for config in sorted(self.test_config_path.glob("""**/*.yaml""")): with self.subTest(config_file=SCREAMING_SNAKE_CASE): execute_subprocess_async( self.base_cmd + ["""--config_file""", str(SCREAMING_SNAKE_CASE), self.test_file_path] , env=os.environ.copy()) def UpperCamelCase_ ( self) -> Any: execute_subprocess_async(["""accelerate""", """test"""] , env=os.environ.copy()) class lowercase__ ( unittest.TestCase ): __UpperCAmelCase = '''test-tpu''' __UpperCAmelCase = '''us-central1-a''' __UpperCAmelCase = '''ls''' __UpperCAmelCase = ['''accelerate''', '''tpu-config'''] __UpperCAmelCase = '''cd /usr/share''' __UpperCAmelCase = '''tests/test_samples/test_command_file.sh''' __UpperCAmelCase = '''Running gcloud compute tpus tpu-vm ssh''' def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Union[str, Any] = run_command( self.cmd + ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] , return_stdout=SCREAMING_SNAKE_CASE , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> str: _lowerCamelCase : int = run_command( self.cmd + [ """--config_file""", """tests/test_configs/0_12_0.yaml""", """--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug""", ] , return_stdout=SCREAMING_SNAKE_CASE , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] , return_stdout=SCREAMING_SNAKE_CASE) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Optional[int] = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] , return_stdout=SCREAMING_SNAKE_CASE , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Optional[int] = run_command( self.cmd + [ """--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--command""", """echo \"Hello World\"""", """--debug""", ] , return_stdout=SCREAMING_SNAKE_CASE , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' , SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : Union[str, Any] = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] , return_stdout=SCREAMING_SNAKE_CASE , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Optional[int] = run_command( self.cmd + [ """--config_file""", """tests/test_configs/0_12_0.yaml""", """--command_file""", self.command_file, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug""", ] , return_stdout=SCREAMING_SNAKE_CASE , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : List[str] = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] , return_stdout=SCREAMING_SNAKE_CASE , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' , SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Union[str, Any] = run_command( self.cmd + [ """--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--accelerate_version""", """12.0.0""", """--debug""", ] , return_stdout=SCREAMING_SNAKE_CASE , ) self.assertIn( F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' , SCREAMING_SNAKE_CASE , )
88
'''simple docstring''' import math import sys def _lowerCAmelCase ( __snake_case : int ) -> int: if number != int(__snake_case ): raise ValueError('the value of input must be a natural number' ) if number < 0: raise ValueError('the value of input must not be a negative number' ) if number == 0: return 1 __A : str = [-1] * (number + 1) __A : Dict = 0 for i in range(1 , number + 1 ): __A : int = sys.maxsize __A : int = int(math.sqrt(__snake_case ) ) for j in range(1 , root + 1 ): __A : str = 1 + answers[i - (j**2)] __A : Dict = min(__snake_case , __snake_case ) __A : Union[str, Any] = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
8
0
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) @add_end_docstrings(_a ) class _lowerCamelCase( _a ): def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]: """simple docstring""" super().__init__(*lowerCamelCase, **lowerCamelCase) requires_backends(self, 'vision') self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING) def UpperCamelCase ( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None) -> Dict: """simple docstring""" _lowercase : Any = {} _lowercase : Union[str, Any] = {} if prompt is not None: _lowercase : Optional[int] = prompt if generate_kwargs is not None: _lowercase : Dict = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _lowercase : Optional[Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one') _lowercase : Union[str, Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self, lowerCamelCase, **lowerCamelCase) -> Optional[int]: """simple docstring""" return super().__call__(lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> Union[str, Any]: """simple docstring""" _lowercase : Any = load_image(lowerCamelCase) if prompt is not None: if not isinstance(lowerCamelCase, lowerCamelCase): raise ValueError( F'''Received an invalid text input, got - {type(lowerCamelCase)} - but expected a single string. ''' 'Note also that one single text can be provided for conditional image to text generation.') _lowercase : Optional[int] = self.model.config.model_type if model_type == "git": _lowercase : int = self.image_processor(images=lowerCamelCase, return_tensors=self.framework) _lowercase : List[str] = self.tokenizer(text=lowerCamelCase, add_special_tokens=lowerCamelCase).input_ids _lowercase : List[str] = [self.tokenizer.cls_token_id] + input_ids _lowercase : Optional[Any] = torch.tensor(lowerCamelCase).unsqueeze(0) model_inputs.update({'input_ids': input_ids}) elif model_type == "pix2struct": _lowercase : Optional[int] = self.image_processor(images=lowerCamelCase, header_text=lowerCamelCase, return_tensors=self.framework) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _lowercase : Tuple = self.image_processor(images=lowerCamelCase, return_tensors=self.framework) _lowercase : List[str] = self.tokenizer(lowerCamelCase, return_tensors=self.framework) model_inputs.update(lowerCamelCase) else: raise ValueError(F'''Model type {model_type} does not support conditional text generation''') else: _lowercase : str = self.image_processor(images=lowerCamelCase, return_tensors=self.framework) if self.model.config.model_type == "git" and prompt is None: _lowercase : Optional[int] = None return model_inputs def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> Optional[int]: """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'], lowerCamelCase) and all(x is None for x in model_inputs['input_ids']) ): _lowercase : Optional[Any] = None if generate_kwargs is None: _lowercase : Any = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _lowercase : Tuple = model_inputs.pop(self.model.main_input_name) _lowercase : Optional[int] = self.model.generate(lowerCamelCase, **lowerCamelCase, **lowerCamelCase) return model_outputs def UpperCamelCase ( self, lowerCamelCase) -> int: """simple docstring""" _lowercase : List[Any] = [] for output_ids in model_outputs: _lowercase : Tuple = { 'generated_text': self.tokenizer.decode( lowerCamelCase, skip_special_tokens=lowerCamelCase, ) } records.append(lowerCamelCase) return records
89
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]: __A : int = list(range(len(__snake_case ) ) ) __A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )] index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case ) __A : float = 0 __A : list[float] = [0] * len(__snake_case ) for i in index: if weight[i] <= capacity: __A : Optional[int] = 1 max_value += value[i] capacity -= weight[i] else: __A : List[Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
8
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __UpperCAmelCase = { '''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''], '''tokenization_ctrl''': ['''CTRLTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CTRLForSequenceClassification''', '''CTRLLMHeadModel''', '''CTRLModel''', '''CTRLPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCTRLForSequenceClassification''', '''TFCTRLLMHeadModel''', '''TFCTRLModel''', '''TFCTRLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
90
'''simple docstring''' from __future__ import annotations import math class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase): '''simple docstring''' __A : int = size # approximate the overall size of segment tree with given value __A : Optional[Any] = [0 for i in range(0 , 4 * size)] # create array to store lazy update __A : Optional[Any] = [0 for i in range(0 , 4 * size)] __A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' return idx * 2 def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' return idx * 2 + 1 def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if left_element == right_element: __A : List[Any] = a[left_element - 1] else: __A : List[str] = (left_element + right_element) // 2 self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase) __A : Any = max( self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)]) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if self.flag[idx] is True: __A : Optional[Any] = self.lazy[idx] __A : Optional[Any] = False if left_element != right_element: __A : List[Any] = self.lazy[idx] __A : Dict = self.lazy[idx] __A : Tuple = True __A : Union[str, Any] = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: __A : Optional[int] = val if left_element != right_element: __A : Tuple = val __A : Any = val __A : Tuple = True __A : Union[str, Any] = True return True __A : str = (left_element + right_element) // 2 self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) __A : int = max( self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)]) return True def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if self.flag[idx] is True: __A : Union[str, Any] = self.lazy[idx] __A : List[str] = False if left_element != right_element: __A : Union[str, Any] = self.lazy[idx] __A : Optional[int] = self.lazy[idx] __A : str = True __A : Union[str, Any] = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] __A : Any = (left_element + right_element) // 2 __A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) __A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return max(_UpperCAmelCase , _UpperCAmelCase) def __str__( self): '''simple docstring''' return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)]) if __name__ == "__main__": lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] lowercase__ : str = 15 lowercase__ : List[Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 1_11) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 2_35) print(segt)
8
0
"""simple docstring""" from queue import PriorityQueue from typing import Any import numpy as np def _snake_case ( snake_case__ : dict , snake_case__ : str , snake_case__ : set , snake_case__ : set , snake_case__ : dict , snake_case__ : dict , snake_case__ : PriorityQueue , snake_case__ : dict , snake_case__ : float | int , ): for nxt, d in graph[v]: if nxt in visited_forward: continue A = cst_fwd.get(snake_case__ , np.inf ) A = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) A = new_cost_f A = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: A = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : dict , snake_case__ : dict ): A = -1 A = set() A = set() A = {source: 0} A = {destination: 0} A = {source: None} A = {destination: None} A = PriorityQueue() A = PriorityQueue() A = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): A , A = queue_forward.get() visited_forward.add(snake_case__ ) A , A = queue_backward.get() visited_backward.add(snake_case__ ) A = pass_and_relaxation( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) A = pass_and_relaxation( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: A = shortest_distance return shortest_path_distance _lowercase = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } _lowercase = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
91
'''simple docstring''' def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float: __A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def _lowerCAmelCase ( ) -> Union[str, Any]: print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
8
0
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float: if mass < 0: raise ValueError('''The mass of a body cannot be negative''' ) return 0.5 * mass * abs(__magic_name__ ) * abs(__magic_name__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
92
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ): '''simple docstring''' __A : Optional[int] = parent __A : str = 13 __A : List[Any] = 7 __A : List[str] = True __A : str = True __A : Optional[Any] = True __A : int = True __A : Dict = 99 __A : Dict = 384 __A : Any = 2 __A : int = 4 __A : Optional[Any] = 37 __A : Optional[int] = 'gelu' __A : Dict = 0.1 __A : Optional[int] = 0.1 __A : Any = 512 __A : int = 16 __A : List[str] = 2 __A : str = 0.02 __A : Any = 3 __A : str = 4 __A : Union[str, Any] = 128 __A : int = 2 __A : List[Any] = 9 __A : List[Any] = 1 __A : List[Any] = None def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __A : str = None if self.use_input_mask: __A : List[Any] = random_attention_mask([self.batch_size, self.seq_length]) __A : Optional[Any] = None if self.use_token_type_ids: __A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __A : Optional[int] = None __A : List[str] = None __A : Dict = None if self.use_labels: __A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) __A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __A : str = ids_tensor([self.batch_size] , self.num_choices) __A : List[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : int = TFConvBertModel(config=_UpperCAmelCase) __A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __A : Tuple = [input_ids, input_mask] __A : Any = model(_UpperCAmelCase) __A : Dict = model(_UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase) __A : str = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : str = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Optional[int] = self.num_labels __A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase) __A : Optional[Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : Dict = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Tuple = self.num_choices __A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase) __A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1)) __A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1)) __A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1)) __A : int = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __A : Optional[Any] = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : List[Any] = self.num_labels __A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase) __A : str = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : int = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase) __A : Any = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : Union[str, Any] = model(_UpperCAmelCase) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[int] = self.prepare_config_and_inputs() ( ( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) , ) : Union[str, Any] = config_and_inputs __A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ): lowerCAmelCase = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) lowerCAmelCase = ( { '''feature-extraction''': TFConvBertModel, '''fill-mask''': TFConvBertForMaskedLM, '''question-answering''': TFConvBertForQuestionAnswering, '''text-classification''': TFConvBertForSequenceClassification, '''token-classification''': TFConvBertForTokenClassification, '''zero-shot''': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = TFConvBertModelTester(self) __A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __A : List[str] = True __A : List[str] = True if hasattr(_UpperCAmelCase , 'use_cache'): __A : List[Any] = True __A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length) __A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase) for model_class in self.all_model_classes: __A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase) __A : Optional[int] = model_class(_UpperCAmelCase) __A : Optional[Any] = len(model(_UpperCAmelCase)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase) __A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1') __A : Tuple = tf.keras.models.load_model(_UpperCAmelCase) __A : str = model(_UpperCAmelCase) if self.is_encoder_decoder: __A : Optional[int] = outputs['encoder_hidden_states'] __A : str = outputs['encoder_attentions'] else: __A : List[Any] = outputs['hidden_states'] __A : Optional[Any] = outputs['attentions'] self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase) __A : str = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase) self.assertListEqual( list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base') self.assertIsNotNone(_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common() __A : Any = True __A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length) __A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length) __A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase) __A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase) def check_decoder_attentions_output(_UpperCAmelCase): __A : List[str] = len(_UpperCAmelCase) self.assertEqual(out_len % 2 , 0) __A : Any = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_UpperCAmelCase): __A : str = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __A : Dict = True __A : Any = False __A : str = model_class(_UpperCAmelCase) __A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : List[str] = len(_UpperCAmelCase) self.assertEqual(config.output_hidden_states , _UpperCAmelCase) check_encoder_attentions_output(_UpperCAmelCase) if self.is_encoder_decoder: __A : Union[str, Any] = model_class(_UpperCAmelCase) __A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) self.assertEqual(config.output_hidden_states , _UpperCAmelCase) check_decoder_attentions_output(_UpperCAmelCase) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __A : int = True __A : Tuple = model_class(_UpperCAmelCase) __A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) self.assertEqual(config.output_hidden_states , _UpperCAmelCase) check_encoder_attentions_output(_UpperCAmelCase) # Check attention is always last and order is fine __A : Any = True __A : str = True __A : Union[str, Any] = model_class(_UpperCAmelCase) __A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase)) self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase) check_encoder_attentions_output(_UpperCAmelCase) @require_tf class SCREAMING_SNAKE_CASE (unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base') __A : str = tf.constant([[0, 1, 2, 3, 4, 5]]) __A : Optional[int] = model(_UpperCAmelCase)[0] __A : List[Any] = [1, 6, 768] self.assertEqual(output.shape , _UpperCAmelCase) __A : Tuple = tf.constant( [ [ [-0.03475493, -0.4686034, -0.30638832], [0.22637248, -0.26988646, -0.7423424], [0.10324868, -0.45013508, -0.58280784], ] ]) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4)
8
0
"""simple docstring""" import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @property def snake_case ( self ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ :List[str] = UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) return model def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = self.dummy_uncond_unet lowerCAmelCase__ :int = PNDMScheduler() lowerCAmelCase__ :Any = PNDMPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) pndm.to(__UpperCAmelCase ) pndm.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = torch.manual_seed(0 ) lowerCAmelCase__ :List[str] = pndm(generator=__UpperCAmelCase , num_inference_steps=2_0 , output_type='numpy' ).images lowerCAmelCase__ :str = torch.manual_seed(0 ) lowerCAmelCase__ :Union[str, Any] = pndm(generator=__UpperCAmelCase , num_inference_steps=2_0 , output_type='numpy' , return_dict=__UpperCAmelCase )[0] lowerCAmelCase__ :List[Any] = image[0, -3:, -3:, -1] lowerCAmelCase__ :List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) lowerCAmelCase__ :Optional[int] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = 'google/ddpm-cifar10-32' lowerCAmelCase__ :Optional[Any] = UNetaDModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :Any = PNDMScheduler() lowerCAmelCase__ :Dict = PNDMPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) pndm.to(__UpperCAmelCase ) pndm.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = torch.manual_seed(0 ) lowerCAmelCase__ :str = pndm(generator=__UpperCAmelCase , output_type='numpy' ).images lowerCAmelCase__ :List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) lowerCAmelCase__ :int = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
93
'''simple docstring''' import argparse import os import re lowercase__ : Optional[int] = '''src/diffusers''' # Pattern that looks at the indentation in a line. lowercase__ : Dict = re.compile(r'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''') # Pattern that matches `"key",` and puts `key` in group 0. lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowercase__ : str = re.compile(r'''\[([^\]]+)\]''') def _lowerCAmelCase ( __snake_case : str ) -> Tuple: __A : List[Any] = _re_indent.search(__snake_case ) return "" if search is None else search.groups()[0] def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]: __A : Tuple = 0 __A : Optional[int] = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(__snake_case ): index += 1 __A : Optional[int] = ['\n'.join(lines[:index] )] else: __A : Any = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). __A : Tuple = [lines[index]] index += 1 while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(__snake_case ) ) if index < len(__snake_case ) - 1: __A : Union[str, Any] = [lines[index + 1]] index += 1 else: __A : Union[str, Any] = [] else: blocks.append('\n'.join(__snake_case ) ) __A : Optional[Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(__snake_case ) > 0: blocks.append('\n'.join(__snake_case ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(__snake_case ): blocks.append('\n'.join(lines[index:] ) ) return blocks def _lowerCAmelCase ( __snake_case : List[Any] ) -> int: def _inner(__snake_case : List[Any] ): return key(__snake_case ).lower().replace('_' , '' ) return _inner def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]: # If no key is provided, we use a noop. def noop(__snake_case : List[Any] ): return x if key is None: __A : Optional[Any] = noop # Constants are all uppercase, they go first. __A : str = [obj for obj in objects if key(__snake_case ).isupper()] # Classes are not all uppercase but start with a capital, they go second. __A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()] # Functions begin with a lowercase, they go last. __A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()] __A : Tuple = ignore_underscore(__snake_case ) return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple: # This inner function sort imports between [ ]. def _replace(__snake_case : Tuple ): __A : List[str] = match.groups()[0] if "," not in imports: return f'[{imports}]' __A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __A : Dict = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]" __A : List[Any] = import_statement.split('\n' ) if len(__snake_case ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. __A : Optional[int] = 2 if lines[1].strip() == '[' else 1 __A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] __A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] ) __A : Any = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(__snake_case ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: __A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] ) else: __A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __A : Tuple = keys[:-1] __A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] ) return "\n".join(__snake_case ) else: # Finally we have to deal with imports fitting on one line __A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case ) return import_statement def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]: with open(__snake_case , 'r' ) as f: __A : Dict = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 __A : str = split_code_in_indented_blocks( __snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(__snake_case ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. __A : Tuple = main_blocks[block_idx] __A : int = block.split('\n' ) # Get to the start of the imports. __A : Tuple = 0 while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: __A : Optional[int] = len(__snake_case ) else: line_idx += 1 if line_idx >= len(__snake_case ): continue # Ignore beginning and last line: they don't contain anything. __A : Dict = '\n'.join(block_lines[line_idx:-1] ) __A : int = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. __A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case ) # We have two categories of import key: list or _import_structure[key].append/extend __A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. __A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. __A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None] __A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. __A : str = 0 __A : Any = [] for i in range(len(__snake_case ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: __A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(__snake_case ) count += 1 # And we put our main block back together with its first and last line. __A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(__snake_case ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(__snake_case , 'w' ) as f: f.write('\n'.join(__snake_case ) ) def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]: __A : Tuple = [] for root, _, files in os.walk(__snake_case ): if "__init__.py" in files: __A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case ) if result: __A : Dict = [os.path.join(__snake_case , '__init__.py' )] if len(__snake_case ) > 0: raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' ) if __name__ == "__main__": lowercase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') lowercase__ : Union[str, Any] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
8
0
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def lowercase_ ( __A : dict , __A : str , __A : set , __A : set , __A : dict , __A : dict , __A : PriorityQueue , __A : dict , __A : float | int , ) -> float | int: """simple docstring""" for nxt, d in graph[v]: if nxt in visited_forward: continue lowercase : str =cst_fwd.get(__A , np.inf ) lowercase : str =cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) lowercase : int =new_cost_f lowercase : Optional[int] =v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: lowercase : List[Any] =cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def lowercase_ ( __A : str , __A : str , __A : dict , __A : dict ) -> int: """simple docstring""" lowercase : Optional[Any] =-1 lowercase : Tuple =set() lowercase : str =set() lowercase : List[str] ={source: 0} lowercase : List[str] ={destination: 0} lowercase : Tuple ={source: None} lowercase : List[Any] ={destination: None} lowercase : PriorityQueue[Any] =PriorityQueue() lowercase : PriorityQueue[Any] =PriorityQueue() lowercase : Any =np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): lowercase , lowercase : int =queue_forward.get() visited_forward.add(__A ) lowercase , lowercase : int =queue_backward.get() visited_backward.add(__A ) lowercase : Dict =pass_and_relaxation( __A , __A , __A , __A , __A , __A , __A , __A , __A , ) lowercase : List[Any] =pass_and_relaxation( __A , __A , __A , __A , __A , __A , __A , __A , __A , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: lowercase : Union[str, Any] =shortest_distance return shortest_path_distance SCREAMING_SNAKE_CASE = { 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } SCREAMING_SNAKE_CASE = { 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
94
'''simple docstring''' def _lowerCAmelCase ( __snake_case : int ) -> bool: return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('''Program to check whether a number is a Perfect number or not...''') lowercase__ : int = int(input('''Enter number: ''').strip()) print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
8
0
"""simple docstring""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def snake_case ( A__ ): if isinstance(A__ ,collections.abc.Iterable ): return x return (x, x) @require_flax class UpperCamelCase_ : def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]: pass def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: pass def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: pass def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float ) -> Dict: UpperCAmelCase_ : Tuple = np.abs((a - b) ).max() self.assertLessEqual(lowerCAmelCase_ , lowerCAmelCase_ , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : str ) -> List[Any]: UpperCAmelCase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = FlaxVisionTextDualEncoderModel(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : List[str] ) -> Dict: UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = {"vision_model": vision_model, "text_model": text_model} UpperCAmelCase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Optional[int] ) -> Tuple: UpperCAmelCase_ , UpperCAmelCase_ : str = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = {"vision_model": vision_model, "text_model": text_model} UpperCAmelCase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = after_output[0] UpperCAmelCase_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase_ , 1e-3 ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Union[str, Any] ) -> Dict: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = {"vision_model": vision_model, "text_model": text_model} UpperCAmelCase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = model( input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , output_attentions=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = output.vision_model_output.attentions self.assertEqual(len(lowerCAmelCase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ : str = to_atuple(vision_model.config.image_size ) UpperCAmelCase_ : Optional[Any] = to_atuple(vision_model.config.patch_size ) UpperCAmelCase_ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) UpperCAmelCase_ : Dict = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) UpperCAmelCase_ : List[str] = output.text_model_output.attentions self.assertEqual(len(lowerCAmelCase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> str: pt_model.to(lowerCAmelCase_ ) pt_model.eval() # prepare inputs UpperCAmelCase_ : Dict = inputs_dict UpperCAmelCase_ : List[str] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): UpperCAmelCase_ : int = pt_model(**lowerCAmelCase_ ).to_tuple() UpperCAmelCase_ : int = fx_model(**lowerCAmelCase_ ).to_tuple() self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowerCAmelCase_ , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = fx_model_loaded(**lowerCAmelCase_ ).to_tuple() self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowerCAmelCase_ , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ , from_flax=lowerCAmelCase_ ) pt_model_loaded.to(lowerCAmelCase_ ) pt_model_loaded.eval() with torch.no_grad(): UpperCAmelCase_ : Tuple = pt_model_loaded(**lowerCAmelCase_ ).to_tuple() self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowerCAmelCase_ , pt_output_loaded.numpy() , 4e-2 ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ) -> Any: UpperCAmelCase_ : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = VisionTextDualEncoderModel(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = FlaxVisionTextDualEncoderModel(lowerCAmelCase_ ) UpperCAmelCase_ : Any = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase_ ) UpperCAmelCase_ : Dict = fx_state self.check_pt_flax_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> Tuple: UpperCAmelCase_ : str = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = VisionTextDualEncoderModel(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = FlaxVisionTextDualEncoderModel(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = load_flax_weights_in_pytorch_model(lowerCAmelCase_ , fx_model.params ) self.check_pt_flax_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: UpperCAmelCase_ : str = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() self.check_save_load(**lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: UpperCAmelCase_ : str = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCAmelCase_ ) @is_pt_flax_cross_test def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: UpperCAmelCase_ : int = self.prepare_config_and_inputs() UpperCAmelCase_ : int = config_inputs_dict.pop("vision_config" ) UpperCAmelCase_ : int = config_inputs_dict.pop("text_config" ) UpperCAmelCase_ : Optional[Any] = config_inputs_dict self.check_equivalence_pt_to_flax(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) self.check_equivalence_flax_to_pt(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: UpperCAmelCase_ , UpperCAmelCase_ : int = self.get_pretrained_model_and_inputs() UpperCAmelCase_ : List[Any] = model_a(**lowerCAmelCase_ ) UpperCAmelCase_ : int = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : Any = model_a(**lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = after_outputs[0] UpperCAmelCase_ : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase_ , 1e-5 ) @require_flax class UpperCamelCase_ (__A , unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase_ , text_from_pt=lowerCAmelCase_ , ) UpperCAmelCase_ : List[Any] = 13 UpperCAmelCase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) UpperCAmelCase_ : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) UpperCAmelCase_ : Optional[Any] = random_attention_mask([batch_size, 4] ) UpperCAmelCase_ : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> List[str]: UpperCAmelCase_ : List[str] = FlaxViTModel(lowerCAmelCase_ ) UpperCAmelCase_ : Any = FlaxBertModel(lowerCAmelCase_ ) return vision_model, text_model def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = FlaxViTModelTester(self ) UpperCAmelCase_ : Optional[int] = FlaxBertModelTester(self ) UpperCAmelCase_ : Any = vit_model_tester.prepare_config_and_inputs() UpperCAmelCase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ : Any = vision_config_and_inputs UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class UpperCamelCase_ (__A , unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase_ , text_from_pt=lowerCAmelCase_ , ) UpperCAmelCase_ : Any = 13 UpperCAmelCase_ : Union[str, Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) UpperCAmelCase_ : Optional[int] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) UpperCAmelCase_ : int = random_attention_mask([batch_size, 4] ) UpperCAmelCase_ : int = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ) -> str: UpperCAmelCase_ : Tuple = FlaxCLIPVisionModel(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = FlaxBertModel(lowerCAmelCase_ ) return vision_model, text_model def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: UpperCAmelCase_ : List[Any] = FlaxCLIPVisionModelTester(self ) UpperCAmelCase_ : Union[str, Any] = FlaxBertModelTester(self ) UpperCAmelCase_ : List[str] = clip_model_tester.prepare_config_and_inputs() UpperCAmelCase_ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ : Any = vision_config_and_inputs UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class UpperCamelCase_ (unittest.TestCase ): @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: UpperCAmelCase_ : Any = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 ) UpperCAmelCase_ : Tuple = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" ) UpperCAmelCase_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) UpperCAmelCase_ : int = processor( text=["una foto di un gatto", "una foto di un cane"] , images=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="np" ) UpperCAmelCase_ : Optional[int] = model(**lowerCAmelCase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) UpperCAmelCase_ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase_ , atol=1e-3 ) )
95
'''simple docstring''' import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() lowercase__ : Tuple = logging.get_logger(__name__) lowercase__ : str = [ ['''attention''', '''attn'''], ['''encoder_attention''', '''encoder_attn'''], ['''q_lin''', '''q_proj'''], ['''k_lin''', '''k_proj'''], ['''v_lin''', '''v_proj'''], ['''out_lin''', '''out_proj'''], ['''norm_embeddings''', '''layernorm_embedding'''], ['''position_embeddings''', '''embed_positions'''], ['''embeddings''', '''embed_tokens'''], ['''ffn.lin''', '''fc'''], ] def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple: if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: __A : Optional[Any] = k.replace(__snake_case , __snake_case ) if k.startswith('encoder' ): __A : Any = k.replace('.attn' , '.self_attn' ) __A : Any = k.replace('norm1' , 'self_attn_layer_norm' ) __A : str = k.replace('norm2' , 'final_layer_norm' ) elif k.startswith('decoder' ): __A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' ) __A : str = k.replace('norm2' , 'encoder_attn_layer_norm' ) __A : int = k.replace('norm3' , 'final_layer_norm' ) return k def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict: __A : Optional[int] = [ 'model.encoder.layernorm_embedding.weight', 'model.encoder.layernorm_embedding.bias', 'model.decoder.layernorm_embedding.weight', 'model.decoder.layernorm_embedding.bias', ] for k in keys: __A : Tuple = sd.pop(__snake_case ) __A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' ) assert new_k not in sd __A : str = v lowercase__ : Tuple = ['''START'''] @torch.no_grad() def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int: __A : List[str] = torch.load(__snake_case , map_location='cpu' ) __A : Tuple = model['model'] __A : str = BlenderbotConfig.from_json_file(__snake_case ) __A : int = BlenderbotForConditionalGeneration(__snake_case ) __A : List[Any] = m.model.state_dict().keys() __A : Optional[int] = [] __A : Optional[int] = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue __A : Union[str, Any] = rename_state_dict_key(__snake_case ) if new_k not in valid_keys: failures.append([k, new_k] ) else: __A : Optional[Any] = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(__snake_case ) m.model.load_state_dict(__snake_case , strict=__snake_case ) m.half() m.save_pretrained(__snake_case ) if __name__ == "__main__": lowercase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''') parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''') parser.add_argument( '''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use''' ) lowercase__ : Optional[Any] = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
8
0
"""simple docstring""" import requests __lowerCamelCase = '' # <-- Put your OpenWeatherMap appid here! __lowerCamelCase = 'https://api.openweathermap.org/data/2.5/' def a ( __UpperCAmelCase : str = "Chicago" , __UpperCAmelCase : str = APPID ) -> dict: return requests.get(URL_BASE + """weather""" , params=locals() ).json() def a ( __UpperCAmelCase : str = "Kolkata, India" , __UpperCAmelCase : str = APPID ) -> dict: return requests.get(URL_BASE + """forecast""" , params=locals() ).json() def a ( __UpperCAmelCase : float = 55.68 , __UpperCAmelCase : float = 12.57 , __UpperCAmelCase : str = APPID ) -> dict: return requests.get(URL_BASE + """onecall""" , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: __lowerCamelCase = input('Enter a location:').strip() if location: pprint(current_weather(location)) else: break
96
'''simple docstring''' import mpmath # for roots of unity import numpy as np class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None): '''simple docstring''' __A : List[Any] = list(poly_a or [0])[:] __A : Optional[int] = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() __A : Union[str, Any] = len(self.polyA) while self.polyB[-1] == 0: self.polyB.pop() __A : Optional[int] = len(self.polyB) # Add 0 to make lengths equal a power of 2 __A : Optional[Any] = int( 2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1))) while len(self.polyA) < self.c_max_length: self.polyA.append(0) while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform __A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1)) # The product __A : Tuple = self.__multiply() def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' __A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB] # Corner case if len(_UpperCAmelCase) <= 1: return dft[0] # __A : Dict = self.c_max_length // 2 while next_ncol > 0: __A : Optional[Any] = [[] for i in range(_UpperCAmelCase)] __A : Tuple = self.root**next_ncol # First half of next step __A : Optional[Any] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(_UpperCAmelCase): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step __A : List[str] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(_UpperCAmelCase): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root # Update __A : Optional[int] = new_dft __A : Tuple = next_ncol // 2 return dft[0] def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.__dft('A') __A : Optional[Any] = self.__dft('B') __A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] del dft_a del dft_b # Corner Case if len(inverce_c[0]) <= 1: return inverce_c[0] # Inverse DFT __A : Dict = 2 while next_ncol <= self.c_max_length: __A : Optional[int] = [[] for i in range(_UpperCAmelCase)] __A : Any = self.root ** (next_ncol // 2) __A : Tuple = 1 # First half of next step for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root)) current_root *= root # Update __A : int = new_inverse_c next_ncol *= 2 # Unpack __A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self): '''simple docstring''' __A : int = 'A = ' + ' + '.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A])) __A : Optional[Any] = 'B = ' + ' + '.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B])) __A : str = 'A*B = ' + ' + '.join( F'{coef}*x^{i}' for coef, i in enumerate(self.product)) return F'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
8
0
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def a ( snake_case__: Dict , snake_case__: Any , snake_case__: Dict ): '''simple docstring''' lowercase_ = AutoConfig.from_pretrained(snake_case__ ) lowercase_ = FlaxAutoModelForSeqaSeqLM.from_config(config=snake_case__ ) lowercase_ = checkpoints.load_tax_checkpoint(snake_case__ ) lowercase_ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": lowercase_ = '''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": lowercase_ = '''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase_ = '''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): lowercase_ = F'''layers_{str(snake_case__ )}''' # Self-Attention lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning lowercase_ = flax_model.params['''encoder''']['''block'''][str(snake_case__ )]['''layer'''] lowercase_ = tax_attention_key lowercase_ = tax_attention_out lowercase_ = tax_attention_query lowercase_ = tax_attention_value lowercase_ = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase_ = tax_global_layer_norm if split_mlp_wi: lowercase_ = tax_mlp_wi_a lowercase_ = tax_mlp_wi_a else: lowercase_ = tax_mlp_wi lowercase_ = tax_mlp_wo lowercase_ = tax_mlp_layer_norm lowercase_ = flax_model_encoder_layer_block # Only for layer 0: lowercase_ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T lowercase_ = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase_ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T lowercase_ = tax_encoder_global_rel_embedding # Assigning lowercase_ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] lowercase_ = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): lowercase_ = F'''layers_{str(snake_case__ )}''' # Self-Attention lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] lowercase_ = tax_enc_dec_attention_module['''key''']['''kernel'''] lowercase_ = tax_enc_dec_attention_module['''out''']['''kernel'''] lowercase_ = tax_enc_dec_attention_module['''query''']['''kernel'''] lowercase_ = tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning lowercase_ = flax_model.params['''decoder''']['''block'''][str(snake_case__ )]['''layer'''] lowercase_ = tax_attention_key lowercase_ = tax_attention_out lowercase_ = tax_attention_query lowercase_ = tax_attention_value lowercase_ = tax_pre_attention_layer_norm lowercase_ = tax_enc_dec_attention_key lowercase_ = tax_enc_dec_attention_out lowercase_ = tax_enc_dec_attention_query lowercase_ = tax_enc_dec_attention_value lowercase_ = tax_cross_layer_norm if split_mlp_wi: lowercase_ = tax_mlp_wi_a lowercase_ = tax_mlp_wi_a else: lowercase_ = tax_mlp_wi lowercase_ = tax_mlp_wo lowercase_ = txa_mlp_layer_norm lowercase_ = flax_model_decoder_layer_block # Decoder Normalization lowercase_ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] lowercase_ = txa_decoder_norm # Only for layer 0: lowercase_ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T lowercase_ = tax_decoder_rel_embedding # Token Embeddings lowercase_ = tax_model['''target''']['''token_embedder''']['''embedding'''] lowercase_ = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: lowercase_ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(snake_case__ ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.' ) parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.') parser.add_argument( '--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.' ) __a = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
97
'''simple docstring''' import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ): '''simple docstring''' __A : Union[str, Any] = parent __A : Tuple = batch_size __A : List[str] = image_size __A : Dict = patch_size __A : Optional[Any] = num_channels __A : Tuple = is_training __A : Dict = use_labels __A : List[Any] = hidden_size __A : Tuple = num_hidden_layers __A : int = num_attention_heads __A : Optional[int] = intermediate_size __A : Tuple = hidden_act __A : Any = hidden_dropout_prob __A : Optional[Any] = attention_probs_dropout_prob __A : List[Any] = type_sequence_label_size __A : List[Any] = initializer_range __A : Optional[int] = num_labels __A : List[Any] = scope __A : Any = n_targets __A : Union[str, Any] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens __A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size) __A : int = num_patches + 1 + self.num_detection_tokens def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]]) __A : Tuple = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) __A : List[Any] = [] for i in range(self.batch_size): __A : Optional[int] = {} __A : Union[str, Any] = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase) __A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase) labels.append(_UpperCAmelCase) __A : Any = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Any = YolosModel(config=_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : Dict = model(_UpperCAmelCase) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Any = YolosForObjectDetection(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : str = model(pixel_values=_UpperCAmelCase) __A : List[str] = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4)) __A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Any = self.prepare_config_and_inputs() __A ,__A ,__A : Tuple = config_and_inputs __A : Tuple = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ): lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else () lowerCAmelCase = ( {'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False): '''simple docstring''' __A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase) if return_labels: if model_class.__name__ == "YolosForObjectDetection": __A : Any = [] for i in range(self.model_tester.batch_size): __A : Tuple = {} __A : Tuple = torch.ones( size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long) __A : Optional[Any] = torch.ones( self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float) labels.append(_UpperCAmelCase) __A : str = labels return inputs_dict def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = YolosModelTester(self) __A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : Tuple = model_class(_UpperCAmelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) __A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : List[Any] = model_class(_UpperCAmelCase) __A : str = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __A : int = [*signature.parameters.keys()] __A : List[str] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common() __A : Optional[int] = True # in YOLOS, the seq_len is different __A : Dict = self.model_tester.expected_seq_len for model_class in self.all_model_classes: __A : Dict = True __A : Dict = False __A : Union[str, Any] = True __A : Tuple = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Union[str, Any] = outputs.attentions self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] __A : List[Any] = True __A : List[str] = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Optional[Any] = outputs.attentions self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) __A : str = len(_UpperCAmelCase) # Check attention is always last and order is fine __A : Dict = True __A : Dict = True __A : Dict = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Union[str, Any] = 1 self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase)) __A : Optional[Any] = outputs.attentions self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): __A : Tuple = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Optional[Any] = outputs.hidden_states __A : List[str] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase) # YOLOS has a different seq_length __A : Dict = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) __A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : List[str] = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __A : Optional[int] = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase) self.assertIsNotNone(_UpperCAmelCase) def _lowerCAmelCase ( ) -> int: __A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE (unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase) __A : Any = self.default_image_processor __A : str = prepare_img() __A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase) # forward pass with torch.no_grad(): __A : str = model(inputs.pixel_values) # verify outputs __A : Tuple = torch.Size((1, 100, 92)) self.assertEqual(outputs.logits.shape , _UpperCAmelCase) __A : Dict = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , ) __A : int = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4)) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4)) # verify postprocessing __A : List[str] = image_processor.post_process_object_detection( _UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0] __A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase) __A : Union[str, Any] = [75, 75, 17, 63, 17] __A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase) self.assertEqual(len(results['scores']) , 5) self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4)) self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase) self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase))
8
0
'''simple docstring''' import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename lowercase__ : str = 'http://www.mocksite.com/file1.txt' lowercase__ : Union[str, Any] = '"text": ["foo", "foo"]' lowercase__ : str = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8' class __lowerCAmelCase : """simple docstring""" _snake_case : str = 2_0_0 _snake_case : Any = {'Content-Length': '100'} _snake_case : Dict = {} def snake_case__ ( self : Tuple , **lowerCAmelCase__ : str ) -> Tuple: '''simple docstring''' return [bytes(lowerCAmelCase__ , '''utf-8''' )] def a__ ( *lowercase : List[str], **lowercase : List[Any] ) -> Tuple: """simple docstring""" return MockResponse() @pytest.mark.parametrize('''urls_type''', [str, list, dict] ) def a__ ( lowercase : Dict, lowercase : str, lowercase : Any ) -> str: """simple docstring""" import requests monkeypatch.setattr(lowercase, '''request''', lowercase ) _UpperCamelCase = URL if issubclass(lowercase, lowercase ): _UpperCamelCase = url elif issubclass(lowercase, lowercase ): _UpperCamelCase = [url] elif issubclass(lowercase, lowercase ): _UpperCamelCase = {'''train''': url} _UpperCamelCase = '''dummy''' _UpperCamelCase = '''downloads''' _UpperCamelCase = tmp_path _UpperCamelCase = DownloadConfig( cache_dir=os.path.join(lowercase, lowercase ), use_etag=lowercase, ) _UpperCamelCase = DownloadManager(dataset_name=lowercase, download_config=lowercase ) _UpperCamelCase = dl_manager.download(lowercase ) _UpperCamelCase = urls for downloaded_paths in [downloaded_paths]: if isinstance(lowercase, lowercase ): _UpperCamelCase = [downloaded_paths] _UpperCamelCase = [urls] elif isinstance(lowercase, lowercase ): assert "train" in downloaded_paths.keys() _UpperCamelCase = downloaded_paths.values() _UpperCamelCase = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(lowercase, lowercase ): assert downloaded_path == dl_manager.downloaded_paths[input_url] _UpperCamelCase = Path(lowercase ) _UpperCamelCase = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() _UpperCamelCase = downloaded_path.read_text() assert content == CONTENT _UpperCamelCase = downloaded_path.with_suffix('''.json''' ) assert metadata_downloaded_path.exists() _UpperCamelCase = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('''paths_type''', [str, list, dict] ) def a__ ( lowercase : int, lowercase : Union[str, Any], lowercase : List[Any] ) -> Any: """simple docstring""" _UpperCamelCase = str(lowercase ) if issubclass(lowercase, lowercase ): _UpperCamelCase = filename elif issubclass(lowercase, lowercase ): _UpperCamelCase = [filename] elif issubclass(lowercase, lowercase ): _UpperCamelCase = {'''train''': filename} _UpperCamelCase = '''dummy''' _UpperCamelCase = xz_file.parent _UpperCamelCase = '''extracted''' _UpperCamelCase = DownloadConfig( cache_dir=lowercase, use_etag=lowercase, ) _UpperCamelCase = DownloadManager(dataset_name=lowercase, download_config=lowercase ) _UpperCamelCase = dl_manager.extract(lowercase ) _UpperCamelCase = paths for extracted_paths in [extracted_paths]: if isinstance(lowercase, lowercase ): _UpperCamelCase = [extracted_paths] _UpperCamelCase = [paths] elif isinstance(lowercase, lowercase ): assert "train" in extracted_paths.keys() _UpperCamelCase = extracted_paths.values() _UpperCamelCase = paths.values() assert extracted_paths for extracted_path, input_path in zip(lowercase, lowercase ): assert extracted_path == dl_manager.extracted_paths[input_path] _UpperCamelCase = Path(lowercase ) _UpperCamelCase = extracted_path.parts assert parts[-1] == hash_url_to_filename(lowercase, etag=lowercase ) assert parts[-2] == extracted_subdir assert extracted_path.exists() _UpperCamelCase = extracted_path.read_text() _UpperCamelCase = text_file.read_text() assert extracted_file_content == expected_file_content def a__ ( lowercase : Tuple, lowercase : Tuple ) -> Tuple: """simple docstring""" assert path.endswith('''.jsonl''' ) for num_items, line in enumerate(lowercase, start=1 ): _UpperCamelCase = json.loads(line.decode('''utf-8''' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('''archive_jsonl''', ['''tar_jsonl_path''', '''zip_jsonl_path'''] ) def a__ ( lowercase : List[Any], lowercase : Optional[Any] ) -> Optional[int]: """simple docstring""" _UpperCamelCase = request.getfixturevalue(lowercase ) _UpperCamelCase = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowercase ), start=1 ): _test_jsonl(lowercase, lowercase ) assert num_jsonl == 2 @pytest.mark.parametrize('''archive_nested_jsonl''', ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] ) def a__ ( lowercase : List[Any], lowercase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = request.getfixturevalue(lowercase ) _UpperCamelCase = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowercase ), start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowercase ), start=1 ): _test_jsonl(lowercase, lowercase ) assert num_tar == 1 assert num_jsonl == 2 def a__ ( lowercase : Tuple ) -> Tuple: """simple docstring""" _UpperCamelCase = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(lowercase ), start=1 ): assert os.path.basename(lowercase ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
98
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: lowercase__ : Optional[int] = None lowercase__ : List[str] = logging.get_logger(__name__) lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} lowercase__ : List[str] = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''', }, } lowercase__ : Dict = { '''camembert-base''': 5_12, } lowercase__ : str = '''▁''' class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = ['''input_ids''', '''attention_mask'''] lowerCAmelCase = CamembertTokenizer def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ): '''simple docstring''' __A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , ) __A : List[str] = vocab_file __A : Optional[int] = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __A : Optional[Any] = [self.cls_token_id] __A : Optional[int] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None): '''simple docstring''' __A : Optional[int] = [self.sep_token_id] __A : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(_UpperCAmelCase): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return __A : List[Any] = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase): copyfile(self.vocab_file , _UpperCAmelCase) return (out_vocab_file,)
8
0
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 SCREAMING_SNAKE_CASE = get_tests_dir('fixtures') SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/dummy_feature_extractor_config.json') SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/dummy-config.json') class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self ): __a = 0 def snake_case_ ( self ): __a = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(__A , __A ) def snake_case_ ( self ): __a = AutoFeatureExtractor.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def snake_case_ ( self ): with tempfile.TemporaryDirectory() as tmpdirname: __a = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally __a = AutoFeatureExtractor.from_pretrained(__A ).to_dict() config_dict.pop("""feature_extractor_type""" ) __a = WavaVecaFeatureExtractor(**__A ) # save in new folder model_config.save_pretrained(__A ) config.save_pretrained(__A ) __a = AutoFeatureExtractor.from_pretrained(__A ) # make sure private variable is not incorrectly saved __a = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(__A , __A ) def snake_case_ ( self ): __a = AutoFeatureExtractor.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def snake_case_ ( self ): with self.assertRaisesRegex( __A , """bert-base is not a local folder and is not a valid model identifier""" ): __a = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def snake_case_ ( self ): with self.assertRaisesRegex( __A , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): __a = AutoFeatureExtractor.from_pretrained(__A , revision="""aaaaaa""" ) def snake_case_ ( self ): with self.assertRaisesRegex( __A , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): __a = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def snake_case_ ( self ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__A ): __a = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__A ): __a = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__A ) __a = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__A ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__A ) __a = AutoFeatureExtractor.from_pretrained(__A , trust_remote_code=__A ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def snake_case_ ( self ): try: AutoConfig.register("""custom""" , __A ) AutoFeatureExtractor.register(__A , __A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__A ): AutoFeatureExtractor.register(__A , __A ) # Now that the config is registered, it can be used as any other config with the auto-API __a = CustomFeatureExtractor.from_pretrained(__A ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__A ) __a = AutoFeatureExtractor.from_pretrained(__A ) self.assertIsInstance(__A , __A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def snake_case_ ( self ): class __UpperCAmelCase ( __A ): """simple docstring""" _lowerCamelCase = True try: AutoConfig.register("""custom""" , __A ) AutoFeatureExtractor.register(__A , __A ) # If remote code is not set, the default is to use local __a = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. __a = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__A ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub __a = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__A ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(__A , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
99
'''simple docstring''' import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowercase__ : Any = '''hf-internal-testing/tiny-random-bert''' lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''') lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6''' class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_UpperCAmelCase)) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase))) with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f: __A : Any = f.read() self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase)) self.assertTrue(os.path.isfile(_UpperCAmelCase)) # File is cached at the same place the second time. __A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase) # Using a specific revision to test the full commit hash. __A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223') self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'): __A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase) with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'): __A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa') with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'): __A : int = cached_file(_UpperCAmelCase , 'conf') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'): __A : Any = cached_file(_UpperCAmelCase , 'conf') with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f: __A : Dict = f.read() self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf'))) __A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase) self.assertIsNone(_UpperCAmelCase) __A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase) self.assertIsNone(_UpperCAmelCase) __A : List[str] = mock.Mock() __A : Dict = 500 __A : List[str] = {} __A : List[Any] = HTTPError __A : Optional[Any] = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head: __A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase) self.assertIsNone(_UpperCAmelCase) # This check we did call the fake head request mock_head.assert_called() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase)) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase)) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt')) # The function raises if the repository does not exist. with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'): get_file_from_repo('bert-base-case' , _UpperCAmelCase) # The function raises if the revision does not exist. with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'): get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha') __A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase) # The name is the cached name which is not very easy to test, so instead we load the content. __A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read()) self.assertEqual(config['hidden_size'] , 768) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: __A : Tuple = Path(_UpperCAmelCase) / 'a.txt' filename.touch() self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase)) self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt'))
8
0
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class __snake_case : '''simple docstring''' def __init__( self , A_ , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = 13 SCREAMING_SNAKE_CASE__ = 7 SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = 2 SCREAMING_SNAKE_CASE__ = 99 SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 32 SCREAMING_SNAKE_CASE__ = 2 SCREAMING_SNAKE_CASE__ = 4 SCREAMING_SNAKE_CASE__ = 0.1 SCREAMING_SNAKE_CASE__ = 0.1 SCREAMING_SNAKE_CASE__ = 5_12 SCREAMING_SNAKE_CASE__ = 16 SCREAMING_SNAKE_CASE__ = 2 SCREAMING_SNAKE_CASE__ = 0.02 SCREAMING_SNAKE_CASE__ = 3 SCREAMING_SNAKE_CASE__ = 4 SCREAMING_SNAKE_CASE__ = '''last''' SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = 0 def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) SCREAMING_SNAKE_CASE__ = None if self.use_input_lengths: SCREAMING_SNAKE_CASE__ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length SCREAMING_SNAKE_CASE__ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = TFFlaubertModel(config=A_ ) SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids} SCREAMING_SNAKE_CASE__ = model(A_ ) SCREAMING_SNAKE_CASE__ = [input_ids, input_mask] SCREAMING_SNAKE_CASE__ = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = TFFlaubertWithLMHeadModel(A_ ) SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids} SCREAMING_SNAKE_CASE__ = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = TFFlaubertForQuestionAnsweringSimple(A_ ) SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths} SCREAMING_SNAKE_CASE__ = model(A_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = TFFlaubertForSequenceClassification(A_ ) SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths} SCREAMING_SNAKE_CASE__ = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.num_labels SCREAMING_SNAKE_CASE__ = TFFlaubertForTokenClassification(config=A_ ) SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} SCREAMING_SNAKE_CASE__ = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.num_choices SCREAMING_SNAKE_CASE__ = TFFlaubertForMultipleChoice(config=A_ ) SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE__ = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } SCREAMING_SNAKE_CASE__ = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE__ = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''langs''': token_type_ids, '''lengths''': input_lengths, } return config, inputs_dict @require_tf class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ : Optional[int] = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) lowerCamelCase__ : Union[str, Any] = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowerCamelCase__ : Optional[int] = ( { """feature-extraction""": TFFlaubertModel, """fill-mask""": TFFlaubertWithLMHeadModel, """question-answering""": TFFlaubertForQuestionAnsweringSimple, """text-classification""": TFFlaubertForSequenceClassification, """token-classification""": TFFlaubertForTokenClassification, """zero-shot""": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) lowerCamelCase__ : Union[str, Any] = False lowerCamelCase__ : Optional[Any] = False def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = TFFlaubertModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , emb_dim=37 ) def lowercase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ ) @slow def lowercase_ ( self ): '''simple docstring''' for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ = TFFlaubertModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_tf @require_sentencepiece @require_tokenizers class __snake_case ( unittest.TestCase ): '''simple docstring''' @slow def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' ) SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor( [[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" SCREAMING_SNAKE_CASE__ = model(A_ )[0] SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 8, 5_12) ) self.assertEqual(output.shape , A_ ) # compare the actual values for a slice. SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor( [ [ [-1.8768773, -1.566555, 0.27072418], [-1.6920038, -0.5873505, 1.9329599], [-2.9563985, -1.6993835, 1.7972052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
100
'''simple docstring''' import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any: __A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case ) __A : int = AutoModelForSeqaSeqLM.from_config(__snake_case ) model.save_pretrained(__snake_case ) AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
8
0
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def a__ ( A__=None, A__=None ): return field(default_factory=lambda: default, metadata=A__ ) @dataclass class __lowercase : """simple docstring""" _UpperCAmelCase = field( metadata={"""help""": """The csv file to plot."""} , ) _UpperCAmelCase = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , ) _UpperCAmelCase = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , ) _UpperCAmelCase = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Disable logarithmic scale when plotting"""} , ) _UpperCAmelCase = field( default=__SCREAMING_SNAKE_CASE , metadata={ """help""": """Whether the csv file has training results or inference results. Defaults to inference results.""" } , ) _UpperCAmelCase = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , ) _UpperCAmelCase = list_field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} ) def a__ ( A__ ): try: int(A__ ) return True except ValueError: return False def a__ ( A__ ): try: float(A__ ) return True except ValueError: return False class __lowercase : """simple docstring""" def __init__( self , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = args SCREAMING_SNAKE_CASE_ : int = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='' ) as csv_file: SCREAMING_SNAKE_CASE_ : Optional[Any] = csv.DictReader(lowerCAmelCase__ ) for row in reader: SCREAMING_SNAKE_CASE_ : Optional[int] = row['model'] self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) ) self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) ) if can_convert_to_int(row['result'] ): # value is not None SCREAMING_SNAKE_CASE_ : int = int(row['result'] ) elif can_convert_to_float(row['result'] ): # value is not None SCREAMING_SNAKE_CASE_ : int = float(row['result'] ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = plt.subplots() SCREAMING_SNAKE_CASE_ : List[Any] = 'Time usage' if self.args.is_time else 'Memory usage' SCREAMING_SNAKE_CASE_ : Dict = title_str + ' for training' if self.args.is_train else title_str + ' for inference' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('log' ) ax.set_yscale('log' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): SCREAMING_SNAKE_CASE_ : List[Any] = sorted(set(self.result_dict[model_name]['bsz'] ) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = sorted(set(self.result_dict[model_name]['seq_len'] ) ) SCREAMING_SNAKE_CASE_ : List[Any] = self.result_dict[model_name]['result'] ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Optional[Any] = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) SCREAMING_SNAKE_CASE_ : Dict = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: SCREAMING_SNAKE_CASE_ : Any = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase__ , ) else: SCREAMING_SNAKE_CASE_ : int = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Tuple = ( ('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz') ) SCREAMING_SNAKE_CASE_ : str = np.asarray(lowerCAmelCase__ , lowerCAmelCase__ )[: len(lowerCAmelCase__ )] plt.scatter( lowerCAmelCase__ , lowerCAmelCase__ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' ) plt.plot(lowerCAmelCase__ , lowerCAmelCase__ , '--' ) title_str += F''' {label_model_name} vs.''' SCREAMING_SNAKE_CASE_ : Any = title_str[:-4] SCREAMING_SNAKE_CASE_ : List[str] = 'Time in s' if self.args.is_time else 'Memory in MB' # plot plt.title(lowerCAmelCase__ ) plt.xlabel(lowerCAmelCase__ ) plt.ylabel(lowerCAmelCase__ ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def a__ ( ): SCREAMING_SNAKE_CASE_ : Any = HfArgumentParser(A__ ) SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args_into_dataclasses()[0] SCREAMING_SNAKE_CASE_ : Optional[int] = Plot(args=A__ ) plot.plot() if __name__ == "__main__": main()
101
'''simple docstring''' from ...configuration_utils import PretrainedConfig lowercase__ : Any = { '''google/tapas-base-finetuned-sqa''': ( '''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wtq''': ( '''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wikisql-supervised''': ( '''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json''' ), '''google/tapas-base-finetuned-tabfact''': ( '''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json''' ), } class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = '''tapas''' def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ): '''simple docstring''' super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) __A : Dict = vocab_size __A : Tuple = hidden_size __A : Any = num_hidden_layers __A : int = num_attention_heads __A : Tuple = hidden_act __A : Tuple = intermediate_size __A : List[Any] = hidden_dropout_prob __A : int = attention_probs_dropout_prob __A : List[str] = max_position_embeddings __A : Optional[int] = type_vocab_sizes __A : str = initializer_range __A : List[str] = layer_norm_eps # Fine-tuning task hyperparameters __A : List[str] = positive_label_weight __A : List[Any] = num_aggregation_labels __A : Optional[Any] = aggregation_loss_weight __A : Tuple = use_answer_as_supervision __A : List[str] = answer_loss_importance __A : Any = use_normalized_answer_loss __A : Any = huber_loss_delta __A : Union[str, Any] = temperature __A : Tuple = aggregation_temperature __A : Optional[Any] = use_gumbel_for_cells __A : List[str] = use_gumbel_for_aggregation __A : Tuple = average_approximation_function __A : List[str] = cell_selection_preference __A : Dict = answer_loss_cutoff __A : Union[str, Any] = max_num_rows __A : Optional[Any] = max_num_columns __A : int = average_logits_per_cell __A : Optional[Any] = select_one_column __A : int = allow_empty_column_selection __A : List[Any] = init_cell_selection_weights_to_zero __A : int = reset_position_index_per_cell __A : Union[str, Any] = disable_per_token_loss # Aggregation hyperparameters __A : Optional[Any] = aggregation_labels __A : List[str] = no_aggregation_label_index if isinstance(self.aggregation_labels , _UpperCAmelCase): __A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()}
8
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ : str = logging.get_logger(__name__) __magic_name__ : Union[str, Any] = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class lowercase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase : Any = """rwkv""" __lowerCAmelCase : List[str] = {"""max_position_embeddings""": """context_length"""} def __init__( self , _A=5_0_2_7_7 , _A=1_0_2_4 , _A=4_0_9_6 , _A=3_2 , _A=None , _A=None , _A=1e-5 , _A=0 , _A=0 , _A=6 , _A=False , _A=True , **_A , ): '''simple docstring''' UpperCamelCase : Any = vocab_size UpperCamelCase : Optional[Any] = context_length UpperCamelCase : str = hidden_size UpperCamelCase : int = num_hidden_layers UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size UpperCamelCase : Any = intermediate_size if intermediate_size is not None else 4 * hidden_size UpperCamelCase : Any = layer_norm_epsilon UpperCamelCase : Optional[int] = rescale_every UpperCamelCase : Optional[int] = use_cache UpperCamelCase : Union[str, Any] = bos_token_id UpperCamelCase : Any = eos_token_id super().__init__( tie_word_embeddings=_A , bos_token_id=_A , eos_token_id=_A , **_A )
102
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize): '''simple docstring''' __A : Union[str, Any] = 'bilinear' __A : int = max_size __A : Optional[Any] = short_edge_length def __call__( self , _UpperCAmelCase): '''simple docstring''' __A : int = [] for img in imgs: __A ,__A : Dict = img.shape[:2] # later: provide list and randomly choose index for resize __A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1) if size == 0: return img __A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase) if h < w: __A ,__A : Optional[Any] = size, scale * w else: __A ,__A : Optional[Any] = scale * h, size if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size: __A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase) __A : Tuple = newh * scale __A : Dict = neww * scale __A : Dict = int(neww + 0.5) __A : Optional[int] = int(newh + 0.5) if img.dtype == np.uinta: __A : int = Image.fromarray(_UpperCAmelCase) __A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR) __A : Dict = np.asarray(_UpperCAmelCase) else: __A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw __A : Dict = nn.functional.interpolate( _UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0) img_augs.append(_UpperCAmelCase) return img_augs class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase): '''simple docstring''' __A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST) __A : List[Any] = cfg.INPUT.FORMAT __A : Dict = cfg.SIZE_DIVISIBILITY __A : str = cfg.PAD_VALUE __A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST __A : int = cfg.MODEL.DEVICE __A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) __A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) __A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' __A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images])) __A : Dict = [im.shape[-2:] for im in images] __A : Optional[int] = [ nn.functional.pad( _UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(_UpperCAmelCase , _UpperCAmelCase) ] return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase) def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False): '''simple docstring''' with torch.no_grad(): if not isinstance(_UpperCAmelCase , _UpperCAmelCase): __A : int = [images] if single_image: assert len(_UpperCAmelCase) == 1 for i in range(len(_UpperCAmelCase)): if isinstance(images[i] , torch.Tensor): images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float()) elif not isinstance(images[i] , torch.Tensor): images.insert( _UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format)) .to(self.device) .float() , ) # resize smallest edge __A : str = torch.tensor([im.shape[:2] for im in images]) __A : List[str] = self.aug(_UpperCAmelCase) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic __A : Any = [self.normalizer(_UpperCAmelCase) for x in images] # now pad them to do the following operations __A ,__A : Any = self.pad(_UpperCAmelCase) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad __A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict: boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int: assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!" __A ,__A : int = box_size tensor[:, 0].clamp_(min=0 , max=__snake_case ) tensor[:, 1].clamp_(min=0 , max=__snake_case ) tensor[:, 2].clamp_(min=0 , max=__snake_case ) tensor[:, 3].clamp_(min=0 , max=__snake_case )
8
0
"""simple docstring""" from __future__ import annotations import time snake_case = list[tuple[int, int]] snake_case = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class UpperCAmelCase : def __init__( self : Any , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Node | None ): """simple docstring""" _snake_case = pos_x _snake_case = pos_y _snake_case = (pos_y, pos_x) _snake_case = goal_x _snake_case = goal_y _snake_case = parent class UpperCAmelCase : def __init__( self : List[str] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ): """simple docstring""" _snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , __lowerCamelCase ) _snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , __lowerCamelCase ) _snake_case = [self.start] _snake_case = False def __UpperCAmelCase ( self : int ): """simple docstring""" while self.node_queue: _snake_case = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: _snake_case = True return self.retrace_path(__lowerCamelCase ) _snake_case = self.get_successors(__lowerCamelCase ) for node in successors: self.node_queue.append(__lowerCamelCase ) if not self.reached: return [self.start.pos] return None def __UpperCAmelCase ( self : str , __lowerCamelCase : Node ): """simple docstring""" _snake_case = [] for action in delta: _snake_case = parent.pos_x + action[1] _snake_case = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(__lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , __lowerCamelCase ) ) return successors def __UpperCAmelCase ( self : str , __lowerCamelCase : Node | None ): """simple docstring""" _snake_case = node _snake_case = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) _snake_case = current_node.parent path.reverse() return path class UpperCAmelCase : def __init__( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ): """simple docstring""" _snake_case = BreadthFirstSearch(__lowerCamelCase , __lowerCamelCase ) _snake_case = BreadthFirstSearch(__lowerCamelCase , __lowerCamelCase ) _snake_case = False def __UpperCAmelCase ( self : List[str] ): """simple docstring""" while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: _snake_case = self.fwd_bfs.node_queue.pop(0 ) _snake_case = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: _snake_case = True return self.retrace_bidirectional_path( __lowerCamelCase , __lowerCamelCase ) _snake_case = current_bwd_node _snake_case = current_fwd_node _snake_case = { self.fwd_bfs: self.fwd_bfs.get_successors(__lowerCamelCase ), self.bwd_bfs: self.bwd_bfs.get_successors(__lowerCamelCase ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(__lowerCamelCase ) if not self.reached: return [self.fwd_bfs.start.pos] return None def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Node , __lowerCamelCase : Node ): """simple docstring""" _snake_case = self.fwd_bfs.retrace_path(__lowerCamelCase ) _snake_case = self.bwd_bfs.retrace_path(__lowerCamelCase ) bwd_path.pop() bwd_path.reverse() _snake_case = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() snake_case = (0, 0) snake_case = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) snake_case = time.time() snake_case = BreadthFirstSearch(init, goal) snake_case = bfs.search() snake_case = time.time() - start_bfs_time print('''Unidirectional BFS computation time : ''', bfs_time) snake_case = time.time() snake_case = BidirectionalBreadthFirstSearch(init, goal) snake_case = bd_bfs.search() snake_case = time.time() - start_bd_bfs_time print('''Bidirectional BFS computation time : ''', bd_bfs_time)
103
'''simple docstring''' def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741 __A : Tuple = len(__snake_case ) __A : Optional[int] = 0 __A : str = [0] * n __A : int = [False] * n __A : Tuple = [False] * n def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ): if parent == root: out_edge_count += 1 __A : str = True __A : Tuple = at for to in l[at]: if to == parent: pass elif not visited[to]: __A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case ) __A : int = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: __A : Tuple = True # AP found via cycle if at == low[to]: __A : Optional[Any] = True else: __A : Any = min(low[at] , __snake_case ) return out_edge_count for i in range(__snake_case ): if not visited[i]: __A : Tuple = 0 __A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case ) __A : Union[str, Any] = out_edge_count > 1 for x in range(len(__snake_case ) ): if is_art[x] is True: print(__snake_case ) # Adjacency list of graph lowercase__ : Tuple = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
8
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer UpperCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase = { """vocab_file""": { """google/electra-small-generator""": ( """https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt""" ), """google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""", """google/electra-large-generator""": ( """https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt""" ), """google/electra-small-discriminator""": ( """https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt""" ), """google/electra-base-discriminator""": ( """https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt""" ), """google/electra-large-discriminator""": ( """https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """google/electra-small-generator""": ( """https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json""" ), """google/electra-base-generator""": ( """https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json""" ), """google/electra-large-generator""": ( """https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json""" ), """google/electra-small-discriminator""": ( """https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json""" ), """google/electra-base-discriminator""": ( """https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json""" ), """google/electra-large-discriminator""": ( """https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json""" ), }, } UpperCamelCase = { """google/electra-small-generator""": 512, """google/electra-base-generator""": 512, """google/electra-large-generator""": 512, """google/electra-small-discriminator""": 512, """google/electra-base-discriminator""": 512, """google/electra-large-discriminator""": 512, } UpperCamelCase = { """google/electra-small-generator""": {"""do_lower_case""": True}, """google/electra-base-generator""": {"""do_lower_case""": True}, """google/electra-large-generator""": {"""do_lower_case""": True}, """google/electra-small-discriminator""": {"""do_lower_case""": True}, """google/electra-base-discriminator""": {"""do_lower_case""": True}, """google/electra-large-discriminator""": {"""do_lower_case""": True}, } class UpperCamelCase__ ( _lowerCAmelCase ): """simple docstring""" A__ : Dict = VOCAB_FILES_NAMES A__ : str = PRETRAINED_VOCAB_FILES_MAP A__ : List[Any] = PRETRAINED_INIT_CONFIGURATION A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : List[str] = ElectraTokenizer def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="[UNK]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[PAD]" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> str: super().__init__( SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , SCREAMING_SNAKE_CASE__ ) != do_lower_case or normalizer_state.get("strip_accents" , SCREAMING_SNAKE_CASE__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars ): A__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop("type" ) ) A__ = do_lower_case A__ = strip_accents A__ = tokenize_chinese_chars A__ = normalizer_class(**SCREAMING_SNAKE_CASE__ ) A__ = do_lower_case def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> List[str]: A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]: A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]: A__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ )
104
'''simple docstring''' import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() lowercase__ : Union[str, Any] = logging.get_logger(__name__) lowercase__ : int = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''', '''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''', '''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } lowercase__ : Dict = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]: for attribute in key.split('.' ): __A : int = getattr(__snake_case , __snake_case ) if weight_type is not None: __A : Optional[int] = getattr(__snake_case , __snake_case ).shape else: __A : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": __A : Tuple = value elif weight_type == "weight_g": __A : Union[str, Any] = value elif weight_type == "weight_v": __A : Optional[Any] = value elif weight_type == "bias": __A : Optional[int] = value else: __A : Optional[int] = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]: __A : Optional[Any] = [] __A : Any = fairseq_model.state_dict() __A : Union[str, Any] = hf_model.feature_extractor for name, value in fairseq_dict.items(): __A : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , ) __A : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __A : int = True if "*" in mapped_key: __A : Any = name.split(__snake_case )[0].split('.' )[-2] __A : List[Any] = mapped_key.replace('*' , __snake_case ) if "weight_g" in name: __A : Optional[Any] = 'weight_g' elif "weight_v" in name: __A : Union[str, Any] = 'weight_v' elif "bias" in name and "relative_attention_bias" not in name: __A : Optional[Any] = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __A : Tuple = 'weight' else: __A : Dict = None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(f'Unused weights: {unused_weights}' ) def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int: __A : int = full_name.split('conv_layers.' )[-1] __A : List[str] = name.split('.' ) __A : Optional[int] = int(items[0] ) __A : str = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) __A : Optional[int] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) __A : Union[str, Any] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) __A : Dict = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) __A : Any = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(__snake_case ) @torch.no_grad() def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any: # load the pre-trained checkpoints __A : List[str] = torch.load(__snake_case ) __A : Dict = WavLMConfigOrig(checkpoint['cfg'] ) __A : Optional[int] = WavLMOrig(__snake_case ) model.load_state_dict(checkpoint['model'] ) model.eval() if config_path is not None: __A : List[Any] = WavLMConfig.from_pretrained(__snake_case ) else: __A : Dict = WavLMConfig() __A : Optional[Any] = WavLMModel(__snake_case ) recursively_load_weights(__snake_case , __snake_case ) hf_wavlm.save_pretrained(__snake_case ) if __name__ == "__main__": lowercase__ : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowercase__ : Any = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
8
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : str = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class lowerCAmelCase_ ( lowerCamelCase_ ): __a : Union[str, Any] = "deta" __a : Optional[int] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self ,snake_case__=None ,snake_case__=900 ,snake_case__=2048 ,snake_case__=6 ,snake_case__=2048 ,snake_case__=8 ,snake_case__=6 ,snake_case__=1024 ,snake_case__=8 ,snake_case__=0.0 ,snake_case__=True ,snake_case__="relu" ,snake_case__=256 ,snake_case__=0.1 ,snake_case__=0.0 ,snake_case__=0.0 ,snake_case__=0.02 ,snake_case__=1.0 ,snake_case__=True ,snake_case__=False ,snake_case__="sine" ,snake_case__=5 ,snake_case__=4 ,snake_case__=4 ,snake_case__=True ,snake_case__=300 ,snake_case__=True ,snake_case__=True ,snake_case__=1 ,snake_case__=5 ,snake_case__=2 ,snake_case__=1 ,snake_case__=1 ,snake_case__=5 ,snake_case__=2 ,snake_case__=0.1 ,snake_case__=0.25 ,**snake_case__ ,): if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) SCREAMING_SNAKE_CASE_ : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] ) else: if isinstance(snake_case__ ,snake_case__ ): SCREAMING_SNAKE_CASE_ : List[str] = backbone_config.pop('model_type' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE_ : Optional[Any] = config_class.from_dict(snake_case__ ) SCREAMING_SNAKE_CASE_ : str = backbone_config SCREAMING_SNAKE_CASE_ : Optional[int] = num_queries SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings SCREAMING_SNAKE_CASE_ : Optional[int] = d_model SCREAMING_SNAKE_CASE_ : List[Any] = encoder_ffn_dim SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_layers SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_attention_heads SCREAMING_SNAKE_CASE_ : List[str] = decoder_ffn_dim SCREAMING_SNAKE_CASE_ : List[str] = decoder_layers SCREAMING_SNAKE_CASE_ : Optional[int] = decoder_attention_heads SCREAMING_SNAKE_CASE_ : Optional[Any] = dropout SCREAMING_SNAKE_CASE_ : Optional[int] = attention_dropout SCREAMING_SNAKE_CASE_ : int = activation_dropout SCREAMING_SNAKE_CASE_ : Any = activation_function SCREAMING_SNAKE_CASE_ : Any = init_std SCREAMING_SNAKE_CASE_ : int = init_xavier_std SCREAMING_SNAKE_CASE_ : Tuple = encoder_layerdrop SCREAMING_SNAKE_CASE_ : Tuple = auxiliary_loss SCREAMING_SNAKE_CASE_ : str = position_embedding_type # deformable attributes SCREAMING_SNAKE_CASE_ : Optional[Any] = num_feature_levels SCREAMING_SNAKE_CASE_ : Any = encoder_n_points SCREAMING_SNAKE_CASE_ : Dict = decoder_n_points SCREAMING_SNAKE_CASE_ : Optional[int] = two_stage SCREAMING_SNAKE_CASE_ : str = two_stage_num_proposals SCREAMING_SNAKE_CASE_ : List[Any] = with_box_refine SCREAMING_SNAKE_CASE_ : Optional[Any] = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('If two_stage is True, with_box_refine must be True.' ) # Hungarian matcher SCREAMING_SNAKE_CASE_ : List[Any] = class_cost SCREAMING_SNAKE_CASE_ : Optional[Any] = bbox_cost SCREAMING_SNAKE_CASE_ : Any = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE_ : List[Any] = mask_loss_coefficient SCREAMING_SNAKE_CASE_ : str = dice_loss_coefficient SCREAMING_SNAKE_CASE_ : List[Any] = bbox_loss_coefficient SCREAMING_SNAKE_CASE_ : str = giou_loss_coefficient SCREAMING_SNAKE_CASE_ : Union[str, Any] = eos_coefficient SCREAMING_SNAKE_CASE_ : Tuple = focal_alpha super().__init__(is_encoder_decoder=snake_case__ ,**snake_case__ ) @property def snake_case ( self ): return self.encoder_attention_heads @property def snake_case ( self ): return self.d_model def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Optional[Any] = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE_ : Any = self.__class__.model_type return output
105
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = 42 class SCREAMING_SNAKE_CASE (a__ , a__ ): @register_to_config def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ): '''simple docstring''' super().__init__() __A : Dict = sample_size # time if time_embedding_type == "fourier": __A : int = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase) __A : Any = 2 * block_out_channels[0] elif time_embedding_type == "positional": __A : List[str] = Timesteps( block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase) __A : List[str] = block_out_channels[0] if use_timestep_embedding: __A : Optional[Any] = block_out_channels[0] * 4 __A : Optional[int] = TimestepEmbedding( in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , ) __A : Dict = nn.ModuleList([]) __A : Dict = None __A : Tuple = nn.ModuleList([]) __A : Tuple = None # down __A : Any = in_channels for i, down_block_type in enumerate(_UpperCAmelCase): __A : Tuple = output_channel __A : Optional[Any] = block_out_channels[i] if i == 0: input_channel += extra_in_channels __A : List[str] = i == len(_UpperCAmelCase) - 1 __A : int = get_down_block( _UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(_UpperCAmelCase) # mid __A : str = get_mid_block( _UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , ) # up __A : Optional[int] = list(reversed(_UpperCAmelCase)) __A : Optional[int] = reversed_block_out_channels[0] if out_block_type is None: __A : str = out_channels else: __A : List[Any] = block_out_channels[0] for i, up_block_type in enumerate(_UpperCAmelCase): __A : Optional[Any] = output_channel __A : Optional[Any] = ( reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels ) __A : Dict = i == len(_UpperCAmelCase) - 1 __A : str = get_up_block( _UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(_UpperCAmelCase) __A : Optional[int] = output_channel # out __A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32) __A : Optional[Any] = get_out_block( out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , ) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ): '''simple docstring''' __A : Any = timestep if not torch.is_tensor(_UpperCAmelCase): __A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device) elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0: __A : Any = timesteps[None].to(sample.device) __A : List[Any] = self.time_proj(_UpperCAmelCase) if self.config.use_timestep_embedding: __A : Dict = self.time_mlp(_UpperCAmelCase) else: __A : Dict = timestep_embed[..., None] __A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) __A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:])) # 2. down __A : int = () for downsample_block in self.down_blocks: __A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase) down_block_res_samples += res_samples # 3. mid if self.mid_block: __A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase) # 4. up for i, upsample_block in enumerate(self.up_blocks): __A : Any = down_block_res_samples[-1:] __A : Optional[int] = down_block_res_samples[:-1] __A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase) # 5. post-process if self.out_block: __A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase) if not return_dict: return (sample,) return UNetaDOutput(sample=_UpperCAmelCase)
8
0
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __snake_case :List[str] ={'LayoutLMv2Config', 'LayoutLMv3Config'} @is_pipeline_test class lowerCAmelCase__ ( unittest.TestCase ): A_ : Any = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING A_ : List[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: A_ : Dict = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: A_ : int = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict ) -> Any: A = ZeroShotClassificationPipeline( model=__UpperCamelCase , tokenizer=__UpperCamelCase , candidate_labels=['polics', 'health'] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] ) -> int: A = classifier('Who are you voting for in 2020?' , candidate_labels='politics' ) self.assertEqual(__UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase )]} ) # No kwarg A = classifier('Who are you voting for in 2020?' , ['politics'] ) self.assertEqual(__UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase )]} ) A = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] ) self.assertEqual(__UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase )]} ) A = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' ) self.assertEqual( __UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 ) A = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] ) self.assertEqual( __UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 ) A = classifier( 'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' ) self.assertEqual(__UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase )]} ) # https://github.com/huggingface/transformers/issues/13846 A = classifier(['I am happy'] , ['positive', 'negative'] ) self.assertEqual( __UpperCamelCase , [ {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]} for i in range(1 ) ] , ) A = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] ) self.assertEqual( __UpperCamelCase , [ {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]} for i in range(2 ) ] , ) with self.assertRaises(__UpperCamelCase ): classifier('' , candidate_labels='politics' ) with self.assertRaises(__UpperCamelCase ): classifier(__UpperCamelCase , candidate_labels='politics' ) with self.assertRaises(__UpperCamelCase ): classifier('Who are you voting for in 2020?' , candidate_labels='' ) with self.assertRaises(__UpperCamelCase ): classifier('Who are you voting for in 2020?' , candidate_labels=__UpperCamelCase ) with self.assertRaises(__UpperCamelCase ): classifier( 'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , ) with self.assertRaises(__UpperCamelCase ): classifier( 'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=__UpperCamelCase , ) self.run_entailment_id(__UpperCamelCase ) def __UpperCamelCase ( self : int , __UpperCamelCase : Pipeline ) -> Any: A = zero_shot_classifier.model.config A = config.labelaid A = zero_shot_classifier.entailment_id A = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) A = {'entailment': 0, 'neutral': 1, 'contradiction': 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) A = {'ENTAIL': 0, 'NON-ENTAIL': 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) A = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) A = original_labelaid self.assertEqual(__UpperCamelCase , zero_shot_classifier.entailment_id ) @require_torch def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: A = pipeline( 'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( 'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] ) @require_torch def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: A = pipeline( 'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , ) A = zero_shot_classifier( 'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] ) self.assertEqual( nested_simplify(__UpperCamelCase ) , { 'sequence': 'Who are you voting for in 2020?', 'labels': ['science', 'public health', 'politics'], 'scores': [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def __UpperCamelCase ( self : int ) -> Dict: A = pipeline( 'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , ) A = zero_shot_classifier( 'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] ) self.assertEqual( nested_simplify(__UpperCamelCase ) , { 'sequence': 'Who are you voting for in 2020?', 'labels': ['science', 'public health', 'politics'], 'scores': [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: A = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' ) A = zero_shot_classifier( 'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] ) self.assertEqual( nested_simplify(__UpperCamelCase ) , { 'sequence': 'Who are you voting for in 2020?', 'labels': ['politics', 'public health', 'science'], 'scores': [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) A = zero_shot_classifier( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks' ' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder' ' through an attention mechanism. We propose a new simple network architecture, the Transformer, based' ' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two' ' machine translation tasks show these models to be superior in quality while being more parallelizable' ' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014' ' English-to-German translation task, improving over the existing best results, including ensembles by' ' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new' ' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small' ' fraction of the training costs of the best models from the literature. We show that the Transformer' ' generalizes well to other tasks by applying it successfully to English constituency parsing both with' ' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=__UpperCamelCase , ) self.assertEqual( nested_simplify(__UpperCamelCase ) , { 'sequence': ( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural' ' networks in an encoder-decoder configuration. The best performing models also connect the' ' encoder and decoder through an attention mechanism. We propose a new simple network' ' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence' ' and convolutions entirely. Experiments on two machine translation tasks show these models to be' ' superior in quality while being more parallelizable and requiring significantly less time to' ' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,' ' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014' ' English-to-French translation task, our model establishes a new single-model state-of-the-art' ' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training' ' costs of the best models from the literature. We show that the Transformer generalizes well to' ' other tasks by applying it successfully to English constituency parsing both with large and' ' limited training data.' ), 'labels': ['translation', 'machine learning', 'vision', 'statistics'], 'scores': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def __UpperCamelCase ( self : List[str] ) -> Any: A = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' ) A = zero_shot_classifier( 'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] ) self.assertEqual( nested_simplify(__UpperCamelCase ) , { 'sequence': 'Who are you voting for in 2020?', 'labels': ['politics', 'public health', 'science'], 'scores': [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) A = zero_shot_classifier( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks' ' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder' ' through an attention mechanism. We propose a new simple network architecture, the Transformer, based' ' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two' ' machine translation tasks show these models to be superior in quality while being more parallelizable' ' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014' ' English-to-German translation task, improving over the existing best results, including ensembles by' ' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new' ' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small' ' fraction of the training costs of the best models from the literature. We show that the Transformer' ' generalizes well to other tasks by applying it successfully to English constituency parsing both with' ' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=__UpperCamelCase , ) self.assertEqual( nested_simplify(__UpperCamelCase ) , { 'sequence': ( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural' ' networks in an encoder-decoder configuration. The best performing models also connect the' ' encoder and decoder through an attention mechanism. We propose a new simple network' ' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence' ' and convolutions entirely. Experiments on two machine translation tasks show these models to be' ' superior in quality while being more parallelizable and requiring significantly less time to' ' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,' ' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014' ' English-to-French translation task, our model establishes a new single-model state-of-the-art' ' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training' ' costs of the best models from the literature. We show that the Transformer generalizes well to' ' other tasks by applying it successfully to English constituency parsing both with large and' ' limited training data.' ), 'labels': ['translation', 'machine learning', 'vision', 'statistics'], 'scores': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
106
'''simple docstring''' def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int: if len(__snake_case ) != len(__snake_case ): raise ValueError('String lengths must match!' ) __A : Optional[Any] = 0 for chara, chara in zip(__snake_case , __snake_case ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
8
0
'''simple docstring''' from sklearn.metrics import fa_score import datasets _UpperCAmelCase : Dict = ''' The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) ''' _UpperCAmelCase : str = ''' Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives. - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {\'f1\': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results[\'f1\'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results[\'f1\'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro") >>> print(round(results[\'f1\'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'f1\': array([0.8, 0. , 0. ])} ''' _UpperCAmelCase : str = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32' ) ), 'references': datasets.Sequence(datasets.Value('int32' ) ), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ), reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'], ) def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : int, UpperCamelCase__ : List[Any], UpperCamelCase__ : Dict=None, UpperCamelCase__ : Dict=1, UpperCamelCase__ : List[Any]="binary", UpperCamelCase__ : str=None ) -> Optional[int]: _A = fa_score( UpperCamelCase__, UpperCamelCase__, labels=UpperCamelCase__, pos_label=UpperCamelCase__, average=UpperCamelCase__, sample_weight=UpperCamelCase__ ) return {"f1": float(UpperCamelCase__ ) if score.size == 1 else score}
107
'''simple docstring''' import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() lowercase__ : Tuple = logging.get_logger(__name__) def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]: __A : int = RobertaPreLayerNormConfig.from_pretrained( __snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] ) # convert state_dict __A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) ) __A : str = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.' ): __A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ): continue __A : str = tensor_value __A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case ) model.save_pretrained(__snake_case ) # convert tokenizer __A : List[Any] = AutoTokenizer.from_pretrained(__snake_case ) tokenizer.save_pretrained(__snake_case ) if __name__ == "__main__": lowercase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowercase__ : Optional[Any] = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
8
0
from __future__ import annotations def _SCREAMING_SNAKE_CASE ( __snake_case ) -> int: _UpperCAmelCase = len(__snake_case ) // 2 # choose the middle 3 elements _UpperCAmelCase = lst[m - 1 : m + 2] # if middle element is peak if three[1] > three[0] and three[1] > three[2]: return three[1] # if increasing, recurse on right elif three[0] < three[2]: if len(lst[:m] ) == 2: m -= 1 return peak(lst[m:] ) # decreasing else: if len(lst[:m] ) == 2: m += 1 return peak(lst[:m] ) if __name__ == "__main__": import doctest doctest.testmod()
108
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings lowercase__ : Dict = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} ) lowerCAmelCase = field( default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': ( '''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `max_length` value of the model configuration.''' ) } , ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': ( '''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `num_beams` value of the model configuration.''' ) } , ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.''' } , ) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = super().to_dict() for k, v in d.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase): __A : List[Any] = v.to_dict() return d
8
0
'''simple docstring''' import re def __magic_name__ ( __UpperCAmelCase ) -> list: '''simple docstring''' return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" , str_ )] def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = split_input(str_ ) return "".join( ["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' try: __SCREAMING_SNAKE_CASE = split_input(__UpperCAmelCase ) if upper: __SCREAMING_SNAKE_CASE = """""".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __SCREAMING_SNAKE_CASE = """""".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' return to_simple_case(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' try: __SCREAMING_SNAKE_CASE = to_simple_case(__UpperCAmelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' return to_complex_case(__UpperCAmelCase , __UpperCAmelCase , """_""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' return to_complex_case(__UpperCAmelCase , __UpperCAmelCase , """-""" ) if __name__ == "__main__": __import__("doctest").testmod()
109
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : List[Any] = logging.get_logger(__name__) lowercase__ : Optional[int] = { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = '''lxmert''' lowerCAmelCase = {} def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ): '''simple docstring''' __A : Tuple = vocab_size __A : int = hidden_size __A : str = num_attention_heads __A : Tuple = hidden_act __A : int = intermediate_size __A : str = hidden_dropout_prob __A : Optional[int] = attention_probs_dropout_prob __A : Optional[Any] = max_position_embeddings __A : Tuple = type_vocab_size __A : Optional[int] = initializer_range __A : Any = layer_norm_eps __A : Optional[Any] = num_qa_labels __A : Optional[int] = num_object_labels __A : Any = num_attr_labels __A : Union[str, Any] = l_layers __A : Optional[int] = x_layers __A : List[Any] = r_layers __A : Tuple = visual_feat_dim __A : Tuple = visual_pos_dim __A : Optional[int] = visual_loss_normalizer __A : int = task_matched __A : List[Any] = task_mask_lm __A : Optional[Any] = task_obj_predict __A : str = task_qa __A : List[Any] = visual_obj_loss __A : Optional[Any] = visual_attr_loss __A : Union[str, Any] = visual_feat_loss __A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers} super().__init__(**_UpperCAmelCase)
8
0
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class a ( lowercase ): def __init__( self , UpperCamelCase_ , UpperCamelCase_ ): super().__init__() self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self , UpperCamelCase_ = 1 , UpperCamelCase_ = 100 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , ): if audio_length_in_s is None: UpperCAmelCase__ : Any = self.unet.config.sample_size / self.unet.config.sample_rate UpperCAmelCase__ : Any = audio_length_in_s * self.unet.config.sample_rate UpperCAmelCase__ : str = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' ) UpperCAmelCase__ : str = int(UpperCamelCase_ ) if sample_size % down_scale_factor != 0: UpperCAmelCase__ : Union[str, Any] = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' ' process.' ) UpperCAmelCase__ : Optional[int] = int(UpperCamelCase_ ) UpperCAmelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype UpperCAmelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) UpperCAmelCase__ : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=UpperCamelCase_ ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ , device=audio.device ) UpperCAmelCase__ : List[str] = self.scheduler.timesteps.to(UpperCamelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCAmelCase__ : Union[str, Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample # 2. compute previous image: x_t -> t_t-1 UpperCAmelCase__ : Dict = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample UpperCAmelCase__ : int = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCAmelCase__ : Tuple = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=UpperCamelCase_ )
110
'''simple docstring''' import math import sys def _lowerCAmelCase ( __snake_case : int ) -> int: if number != int(__snake_case ): raise ValueError('the value of input must be a natural number' ) if number < 0: raise ValueError('the value of input must not be a negative number' ) if number == 0: return 1 __A : str = [-1] * (number + 1) __A : Dict = 0 for i in range(1 , number + 1 ): __A : int = sys.maxsize __A : int = int(math.sqrt(__snake_case ) ) for j in range(1 , root + 1 ): __A : str = 1 + answers[i - (j**2)] __A : Dict = min(__snake_case , __snake_case ) __A : Union[str, Any] = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
8
0
_lowerCamelCase : str = 0 # The first color of the flag. _lowerCamelCase : List[Any] = 1 # The second color of the flag. _lowerCamelCase : str = 2 # The third color of the flag. _lowerCamelCase : List[str] = (red, white, blue) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list: """simple docstring""" if not sequence: return [] if len(__snake_case ) == 1: return list(__snake_case ) A__ = 0 A__ = len(__snake_case ) - 1 A__ = 0 while mid <= high: if sequence[mid] == colors[0]: A__ = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: A__ = sequence[high], sequence[mid] high -= 1 else: A__ = f"""The elements inside the sequence must contains only {colors} values""" raise ValueError(__snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase : Union[str, Any] = input("""Enter numbers separated by commas:\n""").strip() _lowerCamelCase : Tuple = [int(item.strip()) for item in user_input.split(""",""")] print(F'''{dutch_national_flag_sort(unsorted)}''')
87
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]: __A : int = list(range(len(__snake_case ) ) ) __A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )] index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case ) __A : float = 0 __A : list[float] = [0] * len(__snake_case ) for i in index: if weight[i] <= capacity: __A : Optional[int] = 1 max_value += value[i] capacity -= weight[i] else: __A : List[Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
8
0
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowercase_ ( a__ , a__ ): @register_to_config def __init__( self , *, __A = 4 , __A = 768 , __A , __A , ) -> List[Any]: super().__init__() SCREAMING_SNAKE_CASE_ : str =nn.Parameter(torch.zeros(_UpperCAmelCase ) ) # parameters for additional clip time embeddings SCREAMING_SNAKE_CASE_ : List[str] =nn.Linear(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] =nn.Linear(_UpperCAmelCase , _UpperCAmelCase ) # parameters for encoder hidden states SCREAMING_SNAKE_CASE_ : Dict =clip_extra_context_tokens SCREAMING_SNAKE_CASE_ : List[Any] =nn.Linear( _UpperCAmelCase , self.clip_extra_context_tokens * cross_attention_dim ) SCREAMING_SNAKE_CASE_ : Dict =nn.Linear(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[Any] =nn.LayerNorm(_UpperCAmelCase ) def _snake_case ( self , *, __A , __A , __A , __A ) -> Tuple: if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings SCREAMING_SNAKE_CASE_ : Optional[int] =image_embeddings.shape[0] SCREAMING_SNAKE_CASE_ : List[Any] =self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) SCREAMING_SNAKE_CASE_ : Dict =classifier_free_guidance_embeddings.expand( _UpperCAmelCase , -1 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] SCREAMING_SNAKE_CASE_ : List[str] =prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... SCREAMING_SNAKE_CASE_ : Any =self.embedding_proj(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] =self.clip_image_embeddings_project_to_time_embeddings(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Tuple =time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" SCREAMING_SNAKE_CASE_ : Tuple =self.clip_extra_context_tokens_proj(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : int =clip_extra_context_tokens.reshape(_UpperCAmelCase , -1 , self.clip_extra_context_tokens ) SCREAMING_SNAKE_CASE_ : Optional[Any] =clip_extra_context_tokens.permute(0 , 2 , 1 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.encoder_hidden_states_proj(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[int] =self.text_encoder_hidden_states_norm(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Any =torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
443
'''simple docstring''' from __future__ import annotations import math class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase): '''simple docstring''' __A : int = size # approximate the overall size of segment tree with given value __A : Optional[Any] = [0 for i in range(0 , 4 * size)] # create array to store lazy update __A : Optional[Any] = [0 for i in range(0 , 4 * size)] __A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' return idx * 2 def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' return idx * 2 + 1 def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if left_element == right_element: __A : List[Any] = a[left_element - 1] else: __A : List[str] = (left_element + right_element) // 2 self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase) __A : Any = max( self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)]) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if self.flag[idx] is True: __A : Optional[Any] = self.lazy[idx] __A : Optional[Any] = False if left_element != right_element: __A : List[Any] = self.lazy[idx] __A : Dict = self.lazy[idx] __A : Tuple = True __A : Union[str, Any] = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: __A : Optional[int] = val if left_element != right_element: __A : Tuple = val __A : Any = val __A : Tuple = True __A : Union[str, Any] = True return True __A : str = (left_element + right_element) // 2 self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) __A : int = max( self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)]) return True def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if self.flag[idx] is True: __A : Union[str, Any] = self.lazy[idx] __A : List[str] = False if left_element != right_element: __A : Union[str, Any] = self.lazy[idx] __A : Optional[int] = self.lazy[idx] __A : str = True __A : Union[str, Any] = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] __A : Any = (left_element + right_element) // 2 __A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) __A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return max(_UpperCAmelCase , _UpperCAmelCase) def __str__( self): '''simple docstring''' return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)]) if __name__ == "__main__": lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] lowercase__ : str = 15 lowercase__ : List[Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 1_11) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 2_35) print(segt)
8
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ['''MBartTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ['''MBartTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MBartForCausalLM''', '''MBartForConditionalGeneration''', '''MBartForQuestionAnswering''', '''MBartForSequenceClassification''', '''MBartModel''', '''MBartPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''TFMBartForConditionalGeneration''', '''TFMBartModel''', '''TFMBartPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''FlaxMBartForConditionalGeneration''', '''FlaxMBartForQuestionAnswering''', '''FlaxMBartForSequenceClassification''', '''FlaxMBartModel''', '''FlaxMBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
625
'''simple docstring''' def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float: __A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def _lowerCAmelCase ( ) -> Union[str, Any]: print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
8
0
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self :int ,_UpperCamelCase :List[Any] ,_UpperCamelCase :str=7 ,_UpperCamelCase :str=3 ,_UpperCamelCase :int=3_0 ,_UpperCamelCase :str=4_0_0 ,_UpperCamelCase :str=True ,_UpperCamelCase :List[str]=None ,_UpperCamelCase :Tuple=True ,_UpperCamelCase :Tuple=[0.5, 0.5, 0.5] ,_UpperCamelCase :Optional[Any]=[0.5, 0.5, 0.5] ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :Dict=1 / 2_5_5 ,_UpperCamelCase :Optional[Any]=True ,): snake_case_ : int = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} snake_case_ : List[Any] = parent snake_case_ : Optional[Any] = batch_size snake_case_ : Tuple = num_channels snake_case_ : str = min_resolution snake_case_ : List[Any] = max_resolution snake_case_ : int = do_resize snake_case_ : Any = size snake_case_ : int = do_normalize snake_case_ : Union[str, Any] = image_mean snake_case_ : List[str] = image_std snake_case_ : Dict = do_rescale snake_case_ : List[str] = rescale_factor snake_case_ : Union[str, Any] = do_pad def a__ ( self :Dict ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def a__ ( self :Optional[Any] ,_UpperCamelCase :str ,_UpperCamelCase :Optional[int]=False ): if not batched: snake_case_ : str = image_inputs[0] if isinstance(_UpperCAmelCase ,Image.Image ): snake_case_ : Tuple = image.size else: snake_case_ : int = image.shape[1], image.shape[2] if w < h: snake_case_ : Optional[int] = int(self.size["""shortest_edge"""] * h / w ) snake_case_ : int = self.size['shortest_edge'] elif w > h: snake_case_ : List[Any] = self.size['shortest_edge'] snake_case_ : Dict = int(self.size["""shortest_edge"""] * w / h ) else: snake_case_ : Tuple = self.size['shortest_edge'] snake_case_ : List[Any] = self.size['shortest_edge'] else: snake_case_ : List[Any] = [] for image in image_inputs: snake_case_ : Union[str, Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case_ : Union[str, Any] = max(_UpperCAmelCase ,key=lambda _UpperCamelCase : item[0] )[0] snake_case_ : Optional[int] = max(_UpperCAmelCase ,key=lambda _UpperCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __UpperCamelCase ( a__ , unittest.TestCase ): lowercase : str = YolosImageProcessor if is_vision_available() else None def a__ ( self :Any ): snake_case_ : List[Any] = YolosImageProcessingTester(self ) @property def a__ ( self :Dict ): return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self :Optional[Any] ): snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(_UpperCAmelCase ,"""image_std""" ) ) self.assertTrue(hasattr(_UpperCAmelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(_UpperCAmelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_UpperCAmelCase ,"""size""" ) ) def a__ ( self :Tuple ): snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3} ) self.assertEqual(image_processor.do_pad ,_UpperCAmelCase ) snake_case_ : Any = self.image_processing_class.from_dict( self.image_processor_dict ,size=4_2 ,max_size=8_4 ,pad_and_return_pixel_mask=_UpperCAmelCase ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 4_2, """longest_edge""": 8_4} ) self.assertEqual(image_processor.do_pad ,_UpperCAmelCase ) def a__ ( self :int ): pass def a__ ( self :Optional[int] ): snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase ,Image.Image ) # Test not batched input snake_case_ : Dict = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched snake_case_ : str = self.image_processor_tester.get_expected_values(_UpperCAmelCase ,batched=_UpperCAmelCase ) snake_case_ : List[Any] = image_processing(_UpperCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def a__ ( self :Union[str, Any] ): snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase ,numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase ,np.ndarray ) # Test not batched input snake_case_ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched snake_case_ : Optional[Any] = image_processing(_UpperCAmelCase ,return_tensors="""pt""" ).pixel_values snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ,batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def a__ ( self :List[str] ): snake_case_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase ,torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase ,torch.Tensor ) # Test not batched input snake_case_ : Any = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values snake_case_ : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched snake_case_ : Any = image_processing(_UpperCAmelCase ,return_tensors="""pt""" ).pixel_values snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ,batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def a__ ( self :str ): snake_case_ : Any = self.image_processing_class(**self.image_processor_dict ) snake_case_ : Dict = self.image_processing_class(do_resize=_UpperCAmelCase ,do_normalize=_UpperCAmelCase ,do_rescale=_UpperCAmelCase ) # create random PyTorch tensors snake_case_ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase ,torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase ,torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors snake_case_ : Any = image_processing_a.pad(_UpperCAmelCase ,return_tensors="""pt""" ) snake_case_ : Optional[int] = image_processing_a(_UpperCAmelCase ,return_tensors="""pt""" ) self.assertTrue( torch.allclose(encoded_images_with_method["""pixel_values"""] ,encoded_images["""pixel_values"""] ,atol=1E-4 ) ) @slow def a__ ( self :int ): snake_case_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" ,"""r""" ) as f: snake_case_ : int = json.loads(f.read() ) snake_case_ : Any = {'image_id': 3_9_7_6_9, 'annotations': target} # encode them snake_case_ : int = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" ) snake_case_ : str = image_processing(images=_UpperCAmelCase ,annotations=_UpperCAmelCase ,return_tensors="""pt""" ) # verify pixel values snake_case_ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["""pixel_values"""].shape ,_UpperCAmelCase ) snake_case_ : List[str] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] ,_UpperCAmelCase ,atol=1E-4 ) ) # verify area snake_case_ : Union[str, Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] ,_UpperCAmelCase ) ) # verify boxes snake_case_ : Tuple = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape ,_UpperCAmelCase ) snake_case_ : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] ,_UpperCAmelCase ,atol=1E-3 ) ) # verify image_id snake_case_ : Union[str, Any] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] ,_UpperCAmelCase ) ) # verify is_crowd snake_case_ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] ,_UpperCAmelCase ) ) # verify class_labels snake_case_ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] ,_UpperCAmelCase ) ) # verify orig_size snake_case_ : Union[str, Any] = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] ,_UpperCAmelCase ) ) # verify size snake_case_ : List[str] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] ,_UpperCAmelCase ) ) @slow def a__ ( self :Optional[Any] ): snake_case_ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" ,"""r""" ) as f: snake_case_ : Any = json.loads(f.read() ) snake_case_ : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target} snake_case_ : Dict = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them snake_case_ : List[Any] = YolosImageProcessor(format="""coco_panoptic""" ) snake_case_ : Optional[int] = image_processing(images=_UpperCAmelCase ,annotations=_UpperCAmelCase ,masks_path=_UpperCAmelCase ,return_tensors="""pt""" ) # verify pixel values snake_case_ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["""pixel_values"""].shape ,_UpperCAmelCase ) snake_case_ : int = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] ,_UpperCAmelCase ,atol=1E-4 ) ) # verify area snake_case_ : Union[str, Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] ,_UpperCAmelCase ) ) # verify boxes snake_case_ : Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape ,_UpperCAmelCase ) snake_case_ : Union[str, Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] ,_UpperCAmelCase ,atol=1E-3 ) ) # verify image_id snake_case_ : Union[str, Any] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] ,_UpperCAmelCase ) ) # verify is_crowd snake_case_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] ,_UpperCAmelCase ) ) # verify class_labels snake_case_ : Optional[int] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] ,_UpperCAmelCase ) ) # verify masks snake_case_ : Tuple = 8_2_2_8_7_3 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() ,_UpperCAmelCase ) # verify orig_size snake_case_ : str = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] ,_UpperCAmelCase ) ) # verify size snake_case_ : int = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] ,_UpperCAmelCase ) )
334
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ): '''simple docstring''' __A : Optional[int] = parent __A : str = 13 __A : List[Any] = 7 __A : List[str] = True __A : str = True __A : Optional[Any] = True __A : int = True __A : Dict = 99 __A : Dict = 384 __A : Any = 2 __A : int = 4 __A : Optional[Any] = 37 __A : Optional[int] = 'gelu' __A : Dict = 0.1 __A : Optional[int] = 0.1 __A : Any = 512 __A : int = 16 __A : List[str] = 2 __A : str = 0.02 __A : Any = 3 __A : str = 4 __A : Union[str, Any] = 128 __A : int = 2 __A : List[Any] = 9 __A : List[Any] = 1 __A : List[Any] = None def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __A : str = None if self.use_input_mask: __A : List[Any] = random_attention_mask([self.batch_size, self.seq_length]) __A : Optional[Any] = None if self.use_token_type_ids: __A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __A : Optional[int] = None __A : List[str] = None __A : Dict = None if self.use_labels: __A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) __A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __A : str = ids_tensor([self.batch_size] , self.num_choices) __A : List[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : int = TFConvBertModel(config=_UpperCAmelCase) __A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __A : Tuple = [input_ids, input_mask] __A : Any = model(_UpperCAmelCase) __A : Dict = model(_UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase) __A : str = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : str = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Optional[int] = self.num_labels __A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase) __A : Optional[Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : Dict = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Tuple = self.num_choices __A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase) __A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1)) __A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1)) __A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1)) __A : int = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __A : Optional[Any] = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : List[Any] = self.num_labels __A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase) __A : str = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : int = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase) __A : Any = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : Union[str, Any] = model(_UpperCAmelCase) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[int] = self.prepare_config_and_inputs() ( ( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) , ) : Union[str, Any] = config_and_inputs __A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ): lowerCAmelCase = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) lowerCAmelCase = ( { '''feature-extraction''': TFConvBertModel, '''fill-mask''': TFConvBertForMaskedLM, '''question-answering''': TFConvBertForQuestionAnswering, '''text-classification''': TFConvBertForSequenceClassification, '''token-classification''': TFConvBertForTokenClassification, '''zero-shot''': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = TFConvBertModelTester(self) __A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __A : List[str] = True __A : List[str] = True if hasattr(_UpperCAmelCase , 'use_cache'): __A : List[Any] = True __A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length) __A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase) for model_class in self.all_model_classes: __A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase) __A : Optional[int] = model_class(_UpperCAmelCase) __A : Optional[Any] = len(model(_UpperCAmelCase)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase) __A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1') __A : Tuple = tf.keras.models.load_model(_UpperCAmelCase) __A : str = model(_UpperCAmelCase) if self.is_encoder_decoder: __A : Optional[int] = outputs['encoder_hidden_states'] __A : str = outputs['encoder_attentions'] else: __A : List[Any] = outputs['hidden_states'] __A : Optional[Any] = outputs['attentions'] self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase) __A : str = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase) self.assertListEqual( list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base') self.assertIsNotNone(_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common() __A : Any = True __A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length) __A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length) __A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase) __A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase) def check_decoder_attentions_output(_UpperCAmelCase): __A : List[str] = len(_UpperCAmelCase) self.assertEqual(out_len % 2 , 0) __A : Any = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_UpperCAmelCase): __A : str = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __A : Dict = True __A : Any = False __A : str = model_class(_UpperCAmelCase) __A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : List[str] = len(_UpperCAmelCase) self.assertEqual(config.output_hidden_states , _UpperCAmelCase) check_encoder_attentions_output(_UpperCAmelCase) if self.is_encoder_decoder: __A : Union[str, Any] = model_class(_UpperCAmelCase) __A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) self.assertEqual(config.output_hidden_states , _UpperCAmelCase) check_decoder_attentions_output(_UpperCAmelCase) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __A : int = True __A : Tuple = model_class(_UpperCAmelCase) __A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) self.assertEqual(config.output_hidden_states , _UpperCAmelCase) check_encoder_attentions_output(_UpperCAmelCase) # Check attention is always last and order is fine __A : Any = True __A : str = True __A : Union[str, Any] = model_class(_UpperCAmelCase) __A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase)) self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase) check_encoder_attentions_output(_UpperCAmelCase) @require_tf class SCREAMING_SNAKE_CASE (unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base') __A : str = tf.constant([[0, 1, 2, 3, 4, 5]]) __A : Optional[int] = model(_UpperCAmelCase)[0] __A : List[Any] = [1, 6, 768] self.assertEqual(output.shape , _UpperCAmelCase) __A : Tuple = tf.constant( [ [ [-0.03475493, -0.4686034, -0.30638832], [0.22637248, -0.26988646, -0.7423424], [0.10324868, -0.45013508, -0.58280784], ] ]) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4)
8
0
'''simple docstring''' from __future__ import annotations import queue class UpperCamelCase__ : def __init__( self : str , lowerCamelCase : List[str] ): '''simple docstring''' a__ = data a__ = None a__ = None def _lowerCamelCase () -> TreeNode: print("\n********Press N to stop entering at any point of time********\n" ) a__ = input("Enter the value of the root node: " ).strip().lower() a__ = queue.Queue() a__ = TreeNode(int(__snake_case ) ) q.put(__snake_case ) while not q.empty(): a__ = q.get() a__ = f'''Enter the left node of {node_found.data}: ''' a__ = input(__snake_case ).strip().lower() or 'n' if check == "n": return tree_node a__ = TreeNode(int(__snake_case ) ) a__ = left_node q.put(__snake_case ) a__ = f'''Enter the right node of {node_found.data}: ''' a__ = input(__snake_case ).strip().lower() or 'n' if check == "n": return tree_node a__ = TreeNode(int(__snake_case ) ) a__ = right_node q.put(__snake_case ) raise def _lowerCamelCase (__lowerCamelCase : TreeNode ) -> None: if not isinstance(__snake_case , __snake_case ) or not node: return print(node.data , end="," ) pre_order(node.left ) pre_order(node.right ) def _lowerCamelCase (__lowerCamelCase : TreeNode ) -> None: if not isinstance(__snake_case , __snake_case ) or not node: return in_order(node.left ) print(node.data , end="," ) in_order(node.right ) def _lowerCamelCase (__lowerCamelCase : TreeNode ) -> None: if not isinstance(__snake_case , __snake_case ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end="," ) def _lowerCamelCase (__lowerCamelCase : TreeNode ) -> None: if not isinstance(__snake_case , __snake_case ) or not node: return a__ = queue.Queue() q.put(__snake_case ) while not q.empty(): a__ = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _lowerCamelCase (__lowerCamelCase : TreeNode ) -> None: if not isinstance(__snake_case , __snake_case ) or not node: return a__ = queue.Queue() q.put(__snake_case ) while not q.empty(): a__ = [] while not q.empty(): a__ = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__snake_case ) def _lowerCamelCase (__lowerCamelCase : TreeNode ) -> None: if not isinstance(__snake_case , __snake_case ) or not node: return a__ = [] a__ = node while n or stack: while n: # start from root node, find its left child print(n.data , end="," ) stack.append(__snake_case ) a__ = n.left # end of while means current node doesn't have left child a__ = stack.pop() # start to traverse its right child a__ = n.right def _lowerCamelCase (__lowerCamelCase : TreeNode ) -> None: if not isinstance(__snake_case , __snake_case ) or not node: return a__ = [] a__ = node while n or stack: while n: stack.append(__snake_case ) a__ = n.left a__ = stack.pop() print(n.data , end="," ) a__ = n.right def _lowerCamelCase (__lowerCamelCase : TreeNode ) -> None: if not isinstance(__snake_case , __snake_case ) or not node: return a__ = [], [] a__ = node stacka.append(__snake_case ) while stacka: # to find the reversed order of post order, store it in stack2 a__ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__snake_case ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end="," ) def _lowerCamelCase (__lowerCamelCase : str = "" , __lowerCamelCase : Dict=50 , __lowerCamelCase : Optional[int]="*" ) -> str: if not s: return "\n" + width * char a__ = divmod(width - len(__snake_case ) - 2 , 2 ) return f'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) lowerCAmelCase_ : TreeNode = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 50 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
489
'''simple docstring''' import argparse import os import re lowercase__ : Optional[int] = '''src/diffusers''' # Pattern that looks at the indentation in a line. lowercase__ : Dict = re.compile(r'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''') # Pattern that matches `"key",` and puts `key` in group 0. lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowercase__ : str = re.compile(r'''\[([^\]]+)\]''') def _lowerCAmelCase ( __snake_case : str ) -> Tuple: __A : List[Any] = _re_indent.search(__snake_case ) return "" if search is None else search.groups()[0] def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]: __A : Tuple = 0 __A : Optional[int] = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(__snake_case ): index += 1 __A : Optional[int] = ['\n'.join(lines[:index] )] else: __A : Any = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). __A : Tuple = [lines[index]] index += 1 while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(__snake_case ) ) if index < len(__snake_case ) - 1: __A : Union[str, Any] = [lines[index + 1]] index += 1 else: __A : Union[str, Any] = [] else: blocks.append('\n'.join(__snake_case ) ) __A : Optional[Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(__snake_case ) > 0: blocks.append('\n'.join(__snake_case ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(__snake_case ): blocks.append('\n'.join(lines[index:] ) ) return blocks def _lowerCAmelCase ( __snake_case : List[Any] ) -> int: def _inner(__snake_case : List[Any] ): return key(__snake_case ).lower().replace('_' , '' ) return _inner def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]: # If no key is provided, we use a noop. def noop(__snake_case : List[Any] ): return x if key is None: __A : Optional[Any] = noop # Constants are all uppercase, they go first. __A : str = [obj for obj in objects if key(__snake_case ).isupper()] # Classes are not all uppercase but start with a capital, they go second. __A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()] # Functions begin with a lowercase, they go last. __A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()] __A : Tuple = ignore_underscore(__snake_case ) return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple: # This inner function sort imports between [ ]. def _replace(__snake_case : Tuple ): __A : List[str] = match.groups()[0] if "," not in imports: return f'[{imports}]' __A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __A : Dict = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]" __A : List[Any] = import_statement.split('\n' ) if len(__snake_case ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. __A : Optional[int] = 2 if lines[1].strip() == '[' else 1 __A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] __A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] ) __A : Any = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(__snake_case ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: __A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] ) else: __A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __A : Tuple = keys[:-1] __A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] ) return "\n".join(__snake_case ) else: # Finally we have to deal with imports fitting on one line __A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case ) return import_statement def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]: with open(__snake_case , 'r' ) as f: __A : Dict = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 __A : str = split_code_in_indented_blocks( __snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(__snake_case ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. __A : Tuple = main_blocks[block_idx] __A : int = block.split('\n' ) # Get to the start of the imports. __A : Tuple = 0 while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: __A : Optional[int] = len(__snake_case ) else: line_idx += 1 if line_idx >= len(__snake_case ): continue # Ignore beginning and last line: they don't contain anything. __A : Dict = '\n'.join(block_lines[line_idx:-1] ) __A : int = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. __A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case ) # We have two categories of import key: list or _import_structure[key].append/extend __A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. __A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. __A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None] __A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. __A : str = 0 __A : Any = [] for i in range(len(__snake_case ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: __A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(__snake_case ) count += 1 # And we put our main block back together with its first and last line. __A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(__snake_case ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(__snake_case , 'w' ) as f: f.write('\n'.join(__snake_case ) ) def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]: __A : Tuple = [] for root, _, files in os.walk(__snake_case ): if "__init__.py" in files: __A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case ) if result: __A : Dict = [os.path.join(__snake_case , '__init__.py' )] if len(__snake_case ) > 0: raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' ) if __name__ == "__main__": lowercase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') lowercase__ : Union[str, Any] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
8
0
'''simple docstring''' import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED lowerCamelCase_ = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } lowerCamelCase_ = { '''allenai/led-base-16384''': 1_63_84, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def __lowercase ( ) -> Optional[Any]: '''simple docstring''' _A = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) _A = bs[:] _A = 0 for b in range(2**8 ): if b not in bs: bs.append(__snake_case ) cs.append(2**8 + n ) n += 1 _A = [chr(__snake_case ) for n in cs] return dict(zip(__snake_case , __snake_case ) ) def __lowercase ( __lowercase ) -> str: '''simple docstring''' _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A = char return pairs class _UpperCAmelCase ( a__ ): """simple docstring""" snake_case = VOCAB_FILES_NAMES snake_case = PRETRAINED_VOCAB_FILES_MAP snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any]="replace" , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : Optional[Any]="</s>" , __UpperCAmelCase : Dict="</s>" , __UpperCAmelCase : str="<s>" , __UpperCAmelCase : Any="<unk>" , __UpperCAmelCase : Optional[Any]="<pad>" , __UpperCAmelCase : Dict="<mask>" , __UpperCAmelCase : int=False , **__UpperCAmelCase : Union[str, Any] , ): '''simple docstring''' _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token super().__init__( errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , ) with open(_UpperCAmelCase , encoding="utf-8" ) as vocab_handle: _A = json.load(_UpperCAmelCase ) _A = {v: k for k, v in self.encoder.items()} _A = errors # how to handle errors in decoding _A = bytes_to_unicode() _A = {v: k for k, v in self.byte_encoder.items()} with open(_UpperCAmelCase , encoding="utf-8" ) as merges_handle: _A = merges_handle.read().split("\n" )[1:-1] _A = [tuple(merge.split() ) for merge in bpe_merges] _A = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) _A = {} _A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _A = re.compile(R"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return len(self.encoder ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' if token in self.cache: return self.cache[token] _A = tuple(_UpperCAmelCase ) _A = get_pairs(_UpperCAmelCase ) if not pairs: return token while True: _A = min(_UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break _A = bigram _A = [] _A = 0 while i < len(_UpperCAmelCase ): try: _A = word.index(_UpperCAmelCase , _UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _A = j if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(_UpperCAmelCase ) _A = new_word if len(_UpperCAmelCase ) == 1: break else: _A = get_pairs(_UpperCAmelCase ) _A = ' '.join(_UpperCAmelCase ) _A = word return word def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Any ): '''simple docstring''' _A = [] for token in re.findall(self.pat , _UpperCAmelCase ): _A = ''.join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(" " ) ) return bpe_tokens def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Dict ): '''simple docstring''' return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : str ): '''simple docstring''' return self.decoder.get(_UpperCAmelCase ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Any ): '''simple docstring''' _A = ''.join(_UpperCAmelCase ) _A = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] = None ): '''simple docstring''' if not os.path.isdir(_UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( _UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) _A = os.path.join( _UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + "\n" ) _A = 0 with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) _A = token_index writer.write(" ".join(_UpperCAmelCase ) + "\n" ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _A = [self.cls_token_id] _A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : int = None , __UpperCAmelCase : Union[str, Any] = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1] def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] = None ): '''simple docstring''' _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any]=False , **__UpperCAmelCase : Dict ): '''simple docstring''' _A = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()): _A = ' ' + text return (text, kwargs) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] = None , __UpperCAmelCase : List[Any] = PaddingStrategy.DO_NOT_PAD , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Union[str, Any] = None , ): '''simple docstring''' _A = super()._pad( encoded_inputs=_UpperCAmelCase , max_length=_UpperCAmelCase , padding_strategy=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ) # Load from model defaults if return_attention_mask is None: _A = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: _A = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. _A = len(encoded_inputs["global_attention_mask"] ) != len(_UpperCAmelCase ) if needs_to_be_padded: _A = len(_UpperCAmelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` _A = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": _A = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
330
'''simple docstring''' def _lowerCAmelCase ( __snake_case : int ) -> bool: return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('''Program to check whether a number is a Perfect number or not...''') lowercase__ : int = int(input('''Enter number: ''').strip()) print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
8
0
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: lowerCamelCase : List[str] = 0 lowerCamelCase : Optional[int] = len(__snake_case ) for i in range(n - 1 ): for j in range(i + 1 ,__snake_case ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: if len(__snake_case ) <= 1: return arr, 0 lowerCamelCase : List[Any] = len(__snake_case ) // 2 lowerCamelCase : int = arr[0:mid] lowerCamelCase : List[Any] = arr[mid:] lowerCamelCase : Optional[Any] = count_inversions_recursive(__snake_case ) lowerCamelCase : Any = count_inversions_recursive(__snake_case ) lowerCamelCase : str = _count_cross_inversions(__snake_case ,__snake_case ) lowerCamelCase : int = inversion_p + inversions_q + cross_inversions return c, num_inversions def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: lowerCamelCase : List[str] = [] lowerCamelCase : Optional[int] = 0 while i < len(__snake_case ) and j < len(__snake_case ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__snake_case ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__snake_case ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def A ( ) -> Any: lowerCamelCase : Optional[Any] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) lowerCamelCase : Tuple = count_inversions_bf(__snake_case ) lowerCamelCase : Optional[Any] = count_inversions_recursive(__snake_case ) assert num_inversions_bf == num_inversions_recursive == 8 print("number of inversions = " ,__snake_case ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() lowerCamelCase : str = count_inversions_bf(__snake_case ) lowerCamelCase : Any = count_inversions_recursive(__snake_case ) assert num_inversions_bf == num_inversions_recursive == 0 print("number of inversions = " ,__snake_case ) # an empty list should also have zero inversions lowerCamelCase : Union[str, Any] = [] lowerCamelCase : List[Any] = count_inversions_bf(__snake_case ) lowerCamelCase : List[Any] = count_inversions_recursive(__snake_case ) assert num_inversions_bf == num_inversions_recursive == 0 print("number of inversions = " ,__snake_case ) if __name__ == "__main__": main()
311
'''simple docstring''' import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() lowercase__ : Tuple = logging.get_logger(__name__) lowercase__ : str = [ ['''attention''', '''attn'''], ['''encoder_attention''', '''encoder_attn'''], ['''q_lin''', '''q_proj'''], ['''k_lin''', '''k_proj'''], ['''v_lin''', '''v_proj'''], ['''out_lin''', '''out_proj'''], ['''norm_embeddings''', '''layernorm_embedding'''], ['''position_embeddings''', '''embed_positions'''], ['''embeddings''', '''embed_tokens'''], ['''ffn.lin''', '''fc'''], ] def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple: if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: __A : Optional[Any] = k.replace(__snake_case , __snake_case ) if k.startswith('encoder' ): __A : Any = k.replace('.attn' , '.self_attn' ) __A : Any = k.replace('norm1' , 'self_attn_layer_norm' ) __A : str = k.replace('norm2' , 'final_layer_norm' ) elif k.startswith('decoder' ): __A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' ) __A : str = k.replace('norm2' , 'encoder_attn_layer_norm' ) __A : int = k.replace('norm3' , 'final_layer_norm' ) return k def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict: __A : Optional[int] = [ 'model.encoder.layernorm_embedding.weight', 'model.encoder.layernorm_embedding.bias', 'model.decoder.layernorm_embedding.weight', 'model.decoder.layernorm_embedding.bias', ] for k in keys: __A : Tuple = sd.pop(__snake_case ) __A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' ) assert new_k not in sd __A : str = v lowercase__ : Tuple = ['''START'''] @torch.no_grad() def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int: __A : List[str] = torch.load(__snake_case , map_location='cpu' ) __A : Tuple = model['model'] __A : str = BlenderbotConfig.from_json_file(__snake_case ) __A : int = BlenderbotForConditionalGeneration(__snake_case ) __A : List[Any] = m.model.state_dict().keys() __A : Optional[int] = [] __A : Optional[int] = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue __A : Union[str, Any] = rename_state_dict_key(__snake_case ) if new_k not in valid_keys: failures.append([k, new_k] ) else: __A : Optional[Any] = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(__snake_case ) m.model.load_state_dict(__snake_case , strict=__snake_case ) m.half() m.save_pretrained(__snake_case ) if __name__ == "__main__": lowercase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''') parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''') parser.add_argument( '''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use''' ) lowercase__ : Optional[Any] = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
8
0
import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( a__ ): _UpperCamelCase : Optional[int] = '''naver-clova-ix/donut-base-finetuned-docvqa''' _UpperCamelCase : Tuple = ( '''This is a tool that answers a question about an document (pdf). It takes an input named `document` which ''' '''should be the document containing the information, as well as a `question` that is the question about the ''' '''document. It returns a text that contains the answer to the question.''' ) _UpperCamelCase : Any = '''document_qa''' _UpperCamelCase : Tuple = AutoProcessor _UpperCamelCase : List[Any] = VisionEncoderDecoderModel _UpperCamelCase : List[Any] = ['''image''', '''text'''] _UpperCamelCase : List[Any] = ['''text'''] def __init__( self : Any , *_A : List[str] , **_A : int ) -> Tuple: """simple docstring""" if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) def __a ( self : str , _A : List[str] , _A : Dict ) -> int: """simple docstring""" lowercase : Optional[int] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' lowercase : List[Any] = task_prompt.replace('''{user_input}''' , _UpperCAmelCase ) lowercase : Tuple = self.pre_processor.tokenizer( _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors='''pt''' ).input_ids lowercase : Any = self.pre_processor(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __a ( self : Optional[int] , _A : Union[str, Any] ) -> List[str]: """simple docstring""" return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCAmelCase , ).sequences def __a ( self : int , _A : int ) -> Optional[Any]: """simple docstring""" lowercase : Optional[Any] = self.pre_processor.batch_decode(_UpperCAmelCase )[0] lowercase : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) lowercase : int = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) lowercase : Any = re.sub(r'''<.*?>''' , '''''' , _UpperCAmelCase , count=1 ).strip() # remove first task start token lowercase : List[str] = self.pre_processor.tokenajson(_UpperCAmelCase ) return sequence["answer"]
217
'''simple docstring''' import mpmath # for roots of unity import numpy as np class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None): '''simple docstring''' __A : List[Any] = list(poly_a or [0])[:] __A : Optional[int] = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() __A : Union[str, Any] = len(self.polyA) while self.polyB[-1] == 0: self.polyB.pop() __A : Optional[int] = len(self.polyB) # Add 0 to make lengths equal a power of 2 __A : Optional[Any] = int( 2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1))) while len(self.polyA) < self.c_max_length: self.polyA.append(0) while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform __A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1)) # The product __A : Tuple = self.__multiply() def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' __A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB] # Corner case if len(_UpperCAmelCase) <= 1: return dft[0] # __A : Dict = self.c_max_length // 2 while next_ncol > 0: __A : Optional[Any] = [[] for i in range(_UpperCAmelCase)] __A : Tuple = self.root**next_ncol # First half of next step __A : Optional[Any] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(_UpperCAmelCase): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step __A : List[str] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(_UpperCAmelCase): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root # Update __A : Optional[int] = new_dft __A : Tuple = next_ncol // 2 return dft[0] def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.__dft('A') __A : Optional[Any] = self.__dft('B') __A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] del dft_a del dft_b # Corner Case if len(inverce_c[0]) <= 1: return inverce_c[0] # Inverse DFT __A : Dict = 2 while next_ncol <= self.c_max_length: __A : Optional[int] = [[] for i in range(_UpperCAmelCase)] __A : Any = self.root ** (next_ncol // 2) __A : Tuple = 1 # First half of next step for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root)) current_root *= root # Update __A : int = new_inverse_c next_ncol *= 2 # Unpack __A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self): '''simple docstring''' __A : int = 'A = ' + ' + '.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A])) __A : Optional[Any] = 'B = ' + ' + '.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B])) __A : str = 'A*B = ' + ' + '.join( F'{coef}*x^{i}' for coef, i in enumerate(self.product)) return F'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
8
0
'''simple docstring''' from __future__ import annotations from functools import lru_cache from math import ceil snake_case_ : str = 100 snake_case_ : Tuple = set(range(3, NUM_PRIMES, 2)) primes.add(2) snake_case_ : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100) def __snake_case ( _UpperCAmelCase : int): if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} UpperCamelCase = set() UpperCamelCase = 42 UpperCamelCase = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime): ret.add(sub * prime) return ret def __snake_case ( _UpperCAmelCase : int = 5000): for number_to_partition in range(1, __snake_case): if len(partition(__snake_case)) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F'''{solution() = }''')
212
'''simple docstring''' import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ): '''simple docstring''' __A : Union[str, Any] = parent __A : Tuple = batch_size __A : List[str] = image_size __A : Dict = patch_size __A : Optional[Any] = num_channels __A : Tuple = is_training __A : Dict = use_labels __A : List[Any] = hidden_size __A : Tuple = num_hidden_layers __A : int = num_attention_heads __A : Optional[int] = intermediate_size __A : Tuple = hidden_act __A : Any = hidden_dropout_prob __A : Optional[Any] = attention_probs_dropout_prob __A : List[Any] = type_sequence_label_size __A : List[Any] = initializer_range __A : Optional[int] = num_labels __A : List[Any] = scope __A : Any = n_targets __A : Union[str, Any] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens __A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size) __A : int = num_patches + 1 + self.num_detection_tokens def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]]) __A : Tuple = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) __A : List[Any] = [] for i in range(self.batch_size): __A : Optional[int] = {} __A : Union[str, Any] = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase) __A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase) labels.append(_UpperCAmelCase) __A : Any = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Any = YolosModel(config=_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : Dict = model(_UpperCAmelCase) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Any = YolosForObjectDetection(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : str = model(pixel_values=_UpperCAmelCase) __A : List[str] = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4)) __A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Any = self.prepare_config_and_inputs() __A ,__A ,__A : Tuple = config_and_inputs __A : Tuple = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ): lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else () lowerCAmelCase = ( {'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False): '''simple docstring''' __A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase) if return_labels: if model_class.__name__ == "YolosForObjectDetection": __A : Any = [] for i in range(self.model_tester.batch_size): __A : Tuple = {} __A : Tuple = torch.ones( size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long) __A : Optional[Any] = torch.ones( self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float) labels.append(_UpperCAmelCase) __A : str = labels return inputs_dict def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = YolosModelTester(self) __A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : Tuple = model_class(_UpperCAmelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) __A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : List[Any] = model_class(_UpperCAmelCase) __A : str = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __A : int = [*signature.parameters.keys()] __A : List[str] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common() __A : Optional[int] = True # in YOLOS, the seq_len is different __A : Dict = self.model_tester.expected_seq_len for model_class in self.all_model_classes: __A : Dict = True __A : Dict = False __A : Union[str, Any] = True __A : Tuple = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Union[str, Any] = outputs.attentions self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] __A : List[Any] = True __A : List[str] = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Optional[Any] = outputs.attentions self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) __A : str = len(_UpperCAmelCase) # Check attention is always last and order is fine __A : Dict = True __A : Dict = True __A : Dict = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Union[str, Any] = 1 self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase)) __A : Optional[Any] = outputs.attentions self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): __A : Tuple = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Optional[Any] = outputs.hidden_states __A : List[str] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase) # YOLOS has a different seq_length __A : Dict = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) __A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : List[str] = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __A : Optional[int] = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase) self.assertIsNotNone(_UpperCAmelCase) def _lowerCAmelCase ( ) -> int: __A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE (unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase) __A : Any = self.default_image_processor __A : str = prepare_img() __A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase) # forward pass with torch.no_grad(): __A : str = model(inputs.pixel_values) # verify outputs __A : Tuple = torch.Size((1, 100, 92)) self.assertEqual(outputs.logits.shape , _UpperCAmelCase) __A : Dict = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , ) __A : int = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4)) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4)) # verify postprocessing __A : List[str] = image_processor.post_process_object_detection( _UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0] __A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase) __A : Union[str, Any] = [75, 75, 17, 63, 17] __A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase) self.assertEqual(len(results['scores']) , 5) self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4)) self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase) self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase))
8
0
'''simple docstring''' import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC a_ : Any = parse(importlib.metadata.version("""torch""")) def a_ ( __snake_case : Union[str, Version] , __snake_case : str , __snake_case : str ) -> Optional[Any]: """simple docstring""" if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' ) lowerCamelCase_ =STR_OPERATION_TO_FUNC[operation] if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =parse(importlib.metadata.version(__snake_case ) ) return operation(__snake_case , parse(__snake_case ) ) def a_ ( __snake_case : str , __snake_case : str ) -> Dict: """simple docstring""" return compare_versions(__snake_case , __snake_case , __snake_case )
676
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: lowercase__ : Optional[int] = None lowercase__ : List[str] = logging.get_logger(__name__) lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} lowercase__ : List[str] = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''', }, } lowercase__ : Dict = { '''camembert-base''': 5_12, } lowercase__ : str = '''▁''' class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = ['''input_ids''', '''attention_mask'''] lowerCAmelCase = CamembertTokenizer def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ): '''simple docstring''' __A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , ) __A : List[str] = vocab_file __A : Optional[int] = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __A : Optional[Any] = [self.cls_token_id] __A : Optional[int] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None): '''simple docstring''' __A : Optional[int] = [self.sep_token_id] __A : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(_UpperCAmelCase): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return __A : List[Any] = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase): copyfile(self.vocab_file , _UpperCAmelCase) return (out_vocab_file,)
8
0
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowercase__ ( a__): UpperCamelCase_ = ["""image_processor""", """tokenizer"""] UpperCamelCase_ = """CLIPImageProcessor""" UpperCamelCase_ = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self : Dict , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=None , **UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _UpperCAmelCase , ) SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop('''feature_extractor''' ) SCREAMING_SNAKE_CASE : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_UpperCAmelCase , _UpperCAmelCase ) def __call__( self : List[Any] , UpperCamelCase__ : int=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : Tuple ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: SCREAMING_SNAKE_CASE : int = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if images is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if text is not None and images is not None: SCREAMING_SNAKE_CASE : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase ) def __A ( self : Union[str, Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def __A ( self : int , *UpperCamelCase__ : Any , **UpperCamelCase__ : List[Any] ): '''simple docstring''' return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property def __A ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __A ( self : List[Any] ): '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _UpperCAmelCase , ) return self.image_processor_class @property def __A ( self : Union[str, Any] ): '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _UpperCAmelCase , ) return self.image_processor
248
'''simple docstring''' import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowercase__ : Any = '''hf-internal-testing/tiny-random-bert''' lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''') lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6''' class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_UpperCAmelCase)) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase))) with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f: __A : Any = f.read() self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase)) self.assertTrue(os.path.isfile(_UpperCAmelCase)) # File is cached at the same place the second time. __A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase) # Using a specific revision to test the full commit hash. __A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223') self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'): __A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase) with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'): __A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa') with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'): __A : int = cached_file(_UpperCAmelCase , 'conf') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'): __A : Any = cached_file(_UpperCAmelCase , 'conf') with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f: __A : Dict = f.read() self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf'))) __A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase) self.assertIsNone(_UpperCAmelCase) __A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase) self.assertIsNone(_UpperCAmelCase) __A : List[str] = mock.Mock() __A : Dict = 500 __A : List[str] = {} __A : List[Any] = HTTPError __A : Optional[Any] = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head: __A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase) self.assertIsNone(_UpperCAmelCase) # This check we did call the fake head request mock_head.assert_called() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase)) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase)) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt')) # The function raises if the repository does not exist. with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'): get_file_from_repo('bert-base-case' , _UpperCAmelCase) # The function raises if the revision does not exist. with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'): get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha') __A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase) # The name is the cached name which is not very easy to test, so instead we load the content. __A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read()) self.assertEqual(config['hidden_size'] , 768) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: __A : Tuple = Path(_UpperCAmelCase) / 'a.txt' filename.touch() self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase)) self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt'))
8
0
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_=None , lowercase_=None ) -> List[str]: """simple docstring""" return field(default_factory=lambda: default , metadata=__snake_case ) @dataclass class UpperCamelCase_ : '''simple docstring''' UpperCAmelCase__ = list_field( default=[] , metadata={ '''help''': ( '''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version''' ''' of all available models''' ) } , ) UpperCAmelCase__ = list_field( default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} ) UpperCAmelCase__ = list_field( default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , ) UpperCAmelCase__ = field( default=a__ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , ) UpperCAmelCase__ = field( default=a__ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , ) UpperCAmelCase__ = field( default=a__ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} ) UpperCAmelCase__ = field(default=a__ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} ) UpperCAmelCase__ = field(default=a__ , metadata={'''help''': '''Benchmark training of model'''} ) UpperCAmelCase__ = field(default=a__ , metadata={'''help''': '''Verbose memory tracing'''} ) UpperCAmelCase__ = field( default=a__ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , ) UpperCAmelCase__ = field( default=a__ , metadata={ '''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory''' } , ) UpperCAmelCase__ = field(default=a__ , metadata={'''help''': '''Trace memory line by line'''} ) UpperCAmelCase__ = field(default=a__ , metadata={'''help''': '''Save result to a CSV file'''} ) UpperCAmelCase__ = field(default=a__ , metadata={'''help''': '''Save all print statements in a log file'''} ) UpperCAmelCase__ = field(default=a__ , metadata={'''help''': '''Whether to print environment information'''} ) UpperCAmelCase__ = field( default=a__ , metadata={ '''help''': ( '''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use''' ''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled''' ''' for debugging / testing and on TPU.''' ) } , ) UpperCAmelCase__ = field( default=F"inference_time_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , ) UpperCAmelCase__ = field( default=F"inference_memory_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , ) UpperCAmelCase__ = field( default=F"train_time_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , ) UpperCAmelCase__ = field( default=F"train_memory_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , ) UpperCAmelCase__ = field( default=F"env_info_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving environment information.'''} , ) UpperCAmelCase__ = field( default=F"log_{round(time() )}.csv" , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , ) UpperCAmelCase__ = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} ) UpperCAmelCase__ = field( default=a__ , metadata={ '''help''': ( '''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain''' ''' model weights.''' ) } , ) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' warnings.warn( f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils""" ''' are deprecated in general and it is advised to use external Benchmarking libraries ''' ''' to benchmark Transformer models.''' , _UpperCAmelCase , ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: '''simple docstring''' return json.dumps(dataclasses.asdict(self) , indent=2) @property def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' if len(self.models) <= 0: raise ValueError( '''Please make sure you provide at least one model name / model identifier, *e.g.* `--models''' ''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''') return self.models @property def SCREAMING_SNAKE_CASE ( self : List[str]) ->int: '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info('''Multiprocessing is currently not possible on TPU.''') return False else: return True
87
'''simple docstring''' import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any: __A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case ) __A : int = AutoModelForSeqaSeqLM.from_config(__snake_case ) model.save_pretrained(__snake_case ) AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
8
0
import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple=0 ) -> Any: # Format the message. if name is None: SCREAMING_SNAKE_CASE_ : Union[str, Any] =None else: SCREAMING_SNAKE_CASE_ : List[Any] ='.' * max(0 , spaces - 2 ) + '# {:' + str(5_0 - spaces ) + 's}' SCREAMING_SNAKE_CASE_ : Dict =fmt.format(__snake_case ) # Print and recurse (if needed). if isinstance(__snake_case , __snake_case ): if msg is not None: print(__snake_case ) for k in val.keys(): recursive_print(__snake_case , val[k] , spaces + 2 ) elif isinstance(__snake_case , torch.Tensor ): print(__snake_case , ''':''' , val.size() ) else: print(__snake_case , ''':''' , __snake_case ) def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ) -> Union[str, Any]: # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] # for compatibility with later versions of NVIDIA Megatron-LM. # The inverse operation is performed inside Megatron-LM to read checkpoints: # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 # If param is the weight tensor of the self-attention block, the returned tensor # will have to be transposed one more time to be read by HuggingFace GPT2. SCREAMING_SNAKE_CASE_ : Optional[Any] =param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] SCREAMING_SNAKE_CASE_ : Optional[Any] =(num_heads, hidden_size, num_splits) + input_shape[1:] SCREAMING_SNAKE_CASE_ : List[str] =param.view(*__snake_case ) SCREAMING_SNAKE_CASE_ : Tuple =param.transpose(0 , 2 ) SCREAMING_SNAKE_CASE_ : Dict =param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] SCREAMING_SNAKE_CASE_ : Dict =(num_heads, num_splits, hidden_size) + input_shape[1:] SCREAMING_SNAKE_CASE_ : str =param.view(*__snake_case ) SCREAMING_SNAKE_CASE_ : Optional[Any] =param.transpose(0 , 1 ).contiguous() SCREAMING_SNAKE_CASE_ : Dict =param.view(*__snake_case ) return param def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict ) -> Union[str, Any]: # The converted output model. SCREAMING_SNAKE_CASE_ : str ={} # old versions did not store training args SCREAMING_SNAKE_CASE_ : List[str] =input_state_dict.get('''args''' , __snake_case ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) SCREAMING_SNAKE_CASE_ : Tuple =ds_args.padded_vocab_size SCREAMING_SNAKE_CASE_ : str =ds_args.max_position_embeddings SCREAMING_SNAKE_CASE_ : str =ds_args.hidden_size SCREAMING_SNAKE_CASE_ : Any =ds_args.num_layers SCREAMING_SNAKE_CASE_ : List[Any] =ds_args.num_attention_heads SCREAMING_SNAKE_CASE_ : Union[str, Any] =ds_args.ffn_hidden_size # pprint(config) # The number of heads. SCREAMING_SNAKE_CASE_ : List[Any] =config.n_head # The hidden_size per head. SCREAMING_SNAKE_CASE_ : List[str] =config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): SCREAMING_SNAKE_CASE_ : Optional[Any] =input_state_dict['checkpoint_version'] else: SCREAMING_SNAKE_CASE_ : List[str] =0.0 # The model. SCREAMING_SNAKE_CASE_ : Any =input_state_dict['model'] # The language model. SCREAMING_SNAKE_CASE_ : Tuple =model['language_model'] # The embeddings. SCREAMING_SNAKE_CASE_ : Any =lm['embedding'] # The word embeddings. SCREAMING_SNAKE_CASE_ : int =embeddings['word_embeddings']['weight'] # Truncate the embedding table to vocab_size rows. SCREAMING_SNAKE_CASE_ : Union[str, Any] =word_embeddings[: config.vocab_size, :] SCREAMING_SNAKE_CASE_ : Any =word_embeddings # The position embeddings. SCREAMING_SNAKE_CASE_ : List[Any] =embeddings['position_embeddings']['weight'] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] SCREAMING_SNAKE_CASE_ : Dict =pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( f'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' ) # Store the position embeddings. SCREAMING_SNAKE_CASE_ : Any =pos_embeddings # The transformer. SCREAMING_SNAKE_CASE_ : Any =lm['transformer'] if 'transformer' in lm.keys() else lm['encoder'] # The regex to extract layer names. SCREAMING_SNAKE_CASE_ : Optional[Any] =re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' ) # The simple map of names for "automated" rules. SCREAMING_SNAKE_CASE_ : int ={ 'attention.dense': '.attn.c_proj.', 'self_attention.dense': '.attn.c_proj.', 'mlp.dense_h_to_4h': '.mlp.c_fc.', 'mlp.dense_4h_to_h': '.mlp.c_proj.', } # Extract the layers. for key, val in transformer.items(): # Match the name. SCREAMING_SNAKE_CASE_ : str =layer_re.match(__snake_case ) # Stop if that's not a layer if m is None: break # The index of the layer. SCREAMING_SNAKE_CASE_ : Any =int(m.group(1 ) ) # The name of the operation. SCREAMING_SNAKE_CASE_ : List[Any] =m.group(2 ) # Is it a weight or a bias? SCREAMING_SNAKE_CASE_ : str =m.group(3 ) # The name of the layer. SCREAMING_SNAKE_CASE_ : Dict =f'transformer.h.{layer_idx}' # For layernorm(s), simply store the layer norm. if op_name.endswith('''layernorm''' ): SCREAMING_SNAKE_CASE_ : str ='ln_1' if op_name.startswith('''input''' ) else 'ln_2' SCREAMING_SNAKE_CASE_ : Union[str, Any] =val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. SCREAMING_SNAKE_CASE_ : Dict =torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , __snake_case , __snake_case ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =causal_mask # Insert a "dummy" tensor for masked_bias. SCREAMING_SNAKE_CASE_ : Any =torch.tensor(-1E4 , dtype=torch.floataa ) SCREAMING_SNAKE_CASE_ : List[Any] =masked_bias SCREAMING_SNAKE_CASE_ : List[Any] =fix_query_key_value_ordering(__snake_case , __snake_case , 3 , __snake_case , __snake_case ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. SCREAMING_SNAKE_CASE_ : List[Any] =out_val.transpose(0 , 1 ).contiguous() # Store. SCREAMING_SNAKE_CASE_ : List[Any] =out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": SCREAMING_SNAKE_CASE_ : Any =fix_query_key_value_ordering(__snake_case , __snake_case , 3 , __snake_case , __snake_case ) # Store. No change of shape. SCREAMING_SNAKE_CASE_ : Optional[Any] =out_val # Transpose the weights. elif weight_or_bias == "weight": SCREAMING_SNAKE_CASE_ : Union[str, Any] =megatron_to_transformers[op_name] SCREAMING_SNAKE_CASE_ : Union[str, Any] =val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": SCREAMING_SNAKE_CASE_ : Optional[Any] =megatron_to_transformers[op_name] SCREAMING_SNAKE_CASE_ : Tuple =val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. SCREAMING_SNAKE_CASE_ : List[Any] =transformer['final_layernorm.weight'] SCREAMING_SNAKE_CASE_ : int =transformer['final_layernorm.bias'] # For LM head, transformers' wants the matrix to weight embeddings. SCREAMING_SNAKE_CASE_ : Union[str, Any] =word_embeddings # It should be done! return output_state_dict def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]: # Create the argument parser. SCREAMING_SNAKE_CASE_ : List[Any] =argparse.ArgumentParser() parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' ) parser.add_argument( '''path_to_checkpoint''' , type=__snake_case , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , ) parser.add_argument( '''--config_file''' , default='''''' , type=__snake_case , help='''An optional config json file describing the pre-trained model.''' , ) SCREAMING_SNAKE_CASE_ : Optional[Any] =parser.parse_args() # Extract the basename. SCREAMING_SNAKE_CASE_ : List[Any] =os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' ) if args.path_to_checkpoint.endswith('''.zip''' ): with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint: with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict: SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.load(__snake_case , map_location='''cpu''' ) else: SCREAMING_SNAKE_CASE_ : List[Any] =torch.load(args.path_to_checkpoint , map_location='''cpu''' ) SCREAMING_SNAKE_CASE_ : Dict =input_state_dict.get('''args''' , __snake_case ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: SCREAMING_SNAKE_CASE_ : int ='gelu_fast' elif ds_args.openai_gelu: SCREAMING_SNAKE_CASE_ : Dict ='gelu_new' else: SCREAMING_SNAKE_CASE_ : int ='gelu' else: # in the very early days this used to be "gelu_new" SCREAMING_SNAKE_CASE_ : Optional[Any] ='gelu_new' # Spell out all parameters in case the defaults change. SCREAMING_SNAKE_CASE_ : str =GPTaConfig( vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__snake_case , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=__snake_case , summary_activation=__snake_case , summary_proj_to_labels=__snake_case , summary_first_dropout=0.1 , scale_attn_weights=__snake_case , use_cache=__snake_case , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , ) else: SCREAMING_SNAKE_CASE_ : int =GPTaConfig.from_json_file(args.config_file ) SCREAMING_SNAKE_CASE_ : Optional[Any] =['GPT2LMHeadModel'] # Convert. print('''Converting''' ) SCREAMING_SNAKE_CASE_ : str =convert_megatron_checkpoint(__snake_case , __snake_case , __snake_case ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(__snake_case , __snake_case ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: SCREAMING_SNAKE_CASE_ : List[Any] =ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": SCREAMING_SNAKE_CASE_ : Tuple ='gpt2' elif tokenizer_type == "PretrainedFromHF": SCREAMING_SNAKE_CASE_ : Optional[int] =ds_args.tokenizer_name_or_path else: raise ValueError(f'Unrecognized tokenizer_type {tokenizer_type}' ) else: SCREAMING_SNAKE_CASE_ : int ='gpt2' SCREAMING_SNAKE_CASE_ : Optional[int] =AutoTokenizer.from_pretrained(__snake_case ) SCREAMING_SNAKE_CASE_ : List[str] =type(__snake_case ).__name__ SCREAMING_SNAKE_CASE_ : int =tokenizer_class # Store the config to file. print('''Saving config''' ) config.save_pretrained(__snake_case ) # Save tokenizer based on args print(f'Adding {tokenizer_class} tokenizer files' ) tokenizer.save_pretrained(__snake_case ) # Store the state_dict to file. SCREAMING_SNAKE_CASE_ : Dict =os.path.join(__snake_case , '''pytorch_model.bin''' ) print(f'Saving checkpoint to "{output_checkpoint_file}"' ) torch.save(__snake_case , __snake_case ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
443
'''simple docstring''' from ...configuration_utils import PretrainedConfig lowercase__ : Any = { '''google/tapas-base-finetuned-sqa''': ( '''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wtq''': ( '''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wikisql-supervised''': ( '''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json''' ), '''google/tapas-base-finetuned-tabfact''': ( '''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json''' ), } class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = '''tapas''' def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ): '''simple docstring''' super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) __A : Dict = vocab_size __A : Tuple = hidden_size __A : Any = num_hidden_layers __A : int = num_attention_heads __A : Tuple = hidden_act __A : Tuple = intermediate_size __A : List[Any] = hidden_dropout_prob __A : int = attention_probs_dropout_prob __A : List[str] = max_position_embeddings __A : Optional[int] = type_vocab_sizes __A : str = initializer_range __A : List[str] = layer_norm_eps # Fine-tuning task hyperparameters __A : List[str] = positive_label_weight __A : List[Any] = num_aggregation_labels __A : Optional[Any] = aggregation_loss_weight __A : Tuple = use_answer_as_supervision __A : List[str] = answer_loss_importance __A : Any = use_normalized_answer_loss __A : Any = huber_loss_delta __A : Union[str, Any] = temperature __A : Tuple = aggregation_temperature __A : Optional[Any] = use_gumbel_for_cells __A : List[str] = use_gumbel_for_aggregation __A : Tuple = average_approximation_function __A : List[str] = cell_selection_preference __A : Dict = answer_loss_cutoff __A : Union[str, Any] = max_num_rows __A : Optional[Any] = max_num_columns __A : int = average_logits_per_cell __A : Optional[Any] = select_one_column __A : int = allow_empty_column_selection __A : List[Any] = init_cell_selection_weights_to_zero __A : int = reset_position_index_per_cell __A : Union[str, Any] = disable_per_token_loss # Aggregation hyperparameters __A : Optional[Any] = aggregation_labels __A : List[str] = no_aggregation_label_index if isinstance(self.aggregation_labels , _UpperCAmelCase): __A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()}
8
0
def _UpperCAmelCase ( A = 50 ): '''simple docstring''' UpperCAmelCase__ =[1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f"""{solution() = }""")
625
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize): '''simple docstring''' __A : Union[str, Any] = 'bilinear' __A : int = max_size __A : Optional[Any] = short_edge_length def __call__( self , _UpperCAmelCase): '''simple docstring''' __A : int = [] for img in imgs: __A ,__A : Dict = img.shape[:2] # later: provide list and randomly choose index for resize __A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1) if size == 0: return img __A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase) if h < w: __A ,__A : Optional[Any] = size, scale * w else: __A ,__A : Optional[Any] = scale * h, size if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size: __A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase) __A : Tuple = newh * scale __A : Dict = neww * scale __A : Dict = int(neww + 0.5) __A : Optional[int] = int(newh + 0.5) if img.dtype == np.uinta: __A : int = Image.fromarray(_UpperCAmelCase) __A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR) __A : Dict = np.asarray(_UpperCAmelCase) else: __A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw __A : Dict = nn.functional.interpolate( _UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0) img_augs.append(_UpperCAmelCase) return img_augs class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase): '''simple docstring''' __A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST) __A : List[Any] = cfg.INPUT.FORMAT __A : Dict = cfg.SIZE_DIVISIBILITY __A : str = cfg.PAD_VALUE __A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST __A : int = cfg.MODEL.DEVICE __A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) __A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) __A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' __A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images])) __A : Dict = [im.shape[-2:] for im in images] __A : Optional[int] = [ nn.functional.pad( _UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(_UpperCAmelCase , _UpperCAmelCase) ] return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase) def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False): '''simple docstring''' with torch.no_grad(): if not isinstance(_UpperCAmelCase , _UpperCAmelCase): __A : int = [images] if single_image: assert len(_UpperCAmelCase) == 1 for i in range(len(_UpperCAmelCase)): if isinstance(images[i] , torch.Tensor): images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float()) elif not isinstance(images[i] , torch.Tensor): images.insert( _UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format)) .to(self.device) .float() , ) # resize smallest edge __A : str = torch.tensor([im.shape[:2] for im in images]) __A : List[str] = self.aug(_UpperCAmelCase) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic __A : Any = [self.normalizer(_UpperCAmelCase) for x in images] # now pad them to do the following operations __A ,__A : Any = self.pad(_UpperCAmelCase) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad __A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict: boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int: assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!" __A ,__A : int = box_size tensor[:, 0].clamp_(min=0 , max=__snake_case ) tensor[:, 1].clamp_(min=0 , max=__snake_case ) tensor[:, 2].clamp_(min=0 , max=__snake_case ) tensor[:, 3].clamp_(min=0 , max=__snake_case )
8
0
'''simple docstring''' class __UpperCamelCase : def __init__( self :List[Any] ): snake_case_ : Union[str, Any] = {} def a__ ( self :List[Any] ): print(self.vertex ) for i in self.vertex: print(_UpperCAmelCase ,""" -> """ ,""" -> """.join([str(_UpperCAmelCase ) for j in self.vertex[i]] ) ) def a__ ( self :List[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ): if from_vertex in self.vertex: self.vertex[from_vertex].append(_UpperCAmelCase ) else: # else make a new vertex snake_case_ : Tuple = [to_vertex] def a__ ( self :Dict ): snake_case_ : str = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(_UpperCAmelCase ,_UpperCAmelCase ) def a__ ( self :Tuple ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ): snake_case_ : Dict = True print(_UpperCAmelCase ,end=""" """ ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(_UpperCAmelCase ,_UpperCAmelCase ) if __name__ == "__main__": __A : Any = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print('DFS:') g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
334
'''simple docstring''' def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741 __A : Tuple = len(__snake_case ) __A : Optional[int] = 0 __A : str = [0] * n __A : int = [False] * n __A : Tuple = [False] * n def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ): if parent == root: out_edge_count += 1 __A : str = True __A : Tuple = at for to in l[at]: if to == parent: pass elif not visited[to]: __A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case ) __A : int = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: __A : Tuple = True # AP found via cycle if at == low[to]: __A : Optional[Any] = True else: __A : Any = min(low[at] , __snake_case ) return out_edge_count for i in range(__snake_case ): if not visited[i]: __A : Tuple = 0 __A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case ) __A : Union[str, Any] = out_edge_count > 1 for x in range(len(__snake_case ) ): if is_art[x] is True: print(__snake_case ) # Adjacency list of graph lowercase__ : Tuple = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
8
0
'''simple docstring''' import sys lowerCAmelCase_ : int = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def _lowerCamelCase (__lowerCamelCase : str ) -> int: a__ = 1 for digit in s: product *= int(__snake_case ) return product def _lowerCamelCase (__lowerCamelCase : str = N ) -> int: a__ = -sys.maxsize - 1 a__ = n[:13] a__ = 13 while cur_index < len(__snake_case ) - 13: if int(n[cur_index] ) >= int(substr[0] ): a__ = substr[1:] + n[cur_index] cur_index += 1 else: a__ = max(__snake_case , str_eval(__snake_case ) ) a__ = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(f"""{solution() = }""")
489
'''simple docstring''' import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() lowercase__ : Union[str, Any] = logging.get_logger(__name__) lowercase__ : int = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''', '''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''', '''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } lowercase__ : Dict = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]: for attribute in key.split('.' ): __A : int = getattr(__snake_case , __snake_case ) if weight_type is not None: __A : Optional[int] = getattr(__snake_case , __snake_case ).shape else: __A : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": __A : Tuple = value elif weight_type == "weight_g": __A : Union[str, Any] = value elif weight_type == "weight_v": __A : Optional[Any] = value elif weight_type == "bias": __A : Optional[int] = value else: __A : Optional[int] = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]: __A : Optional[Any] = [] __A : Any = fairseq_model.state_dict() __A : Union[str, Any] = hf_model.feature_extractor for name, value in fairseq_dict.items(): __A : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , ) __A : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __A : int = True if "*" in mapped_key: __A : Any = name.split(__snake_case )[0].split('.' )[-2] __A : List[Any] = mapped_key.replace('*' , __snake_case ) if "weight_g" in name: __A : Optional[Any] = 'weight_g' elif "weight_v" in name: __A : Union[str, Any] = 'weight_v' elif "bias" in name and "relative_attention_bias" not in name: __A : Optional[Any] = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __A : Tuple = 'weight' else: __A : Dict = None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(f'Unused weights: {unused_weights}' ) def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int: __A : int = full_name.split('conv_layers.' )[-1] __A : List[str] = name.split('.' ) __A : Optional[int] = int(items[0] ) __A : str = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) __A : Optional[int] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) __A : Union[str, Any] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) __A : Dict = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) __A : Any = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(__snake_case ) @torch.no_grad() def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any: # load the pre-trained checkpoints __A : List[str] = torch.load(__snake_case ) __A : Dict = WavLMConfigOrig(checkpoint['cfg'] ) __A : Optional[int] = WavLMOrig(__snake_case ) model.load_state_dict(checkpoint['model'] ) model.eval() if config_path is not None: __A : List[Any] = WavLMConfig.from_pretrained(__snake_case ) else: __A : Dict = WavLMConfig() __A : Optional[Any] = WavLMModel(__snake_case ) recursively_load_weights(__snake_case , __snake_case ) hf_wavlm.save_pretrained(__snake_case ) if __name__ == "__main__": lowercase__ : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowercase__ : Any = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
8
0
'''simple docstring''' def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> int: '''simple docstring''' _A = len(__snake_case ), len(grid[0] ) if ( min(__snake_case , __snake_case ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _A = 0 count += depth_first_search(__snake_case , row + 1 , __snake_case , __snake_case ) count += depth_first_search(__snake_case , row - 1 , __snake_case , __snake_case ) count += depth_first_search(__snake_case , __snake_case , col + 1 , __snake_case ) count += depth_first_search(__snake_case , __snake_case , col - 1 , __snake_case ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
330
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = 42 class SCREAMING_SNAKE_CASE (a__ , a__ ): @register_to_config def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ): '''simple docstring''' super().__init__() __A : Dict = sample_size # time if time_embedding_type == "fourier": __A : int = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase) __A : Any = 2 * block_out_channels[0] elif time_embedding_type == "positional": __A : List[str] = Timesteps( block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase) __A : List[str] = block_out_channels[0] if use_timestep_embedding: __A : Optional[Any] = block_out_channels[0] * 4 __A : Optional[int] = TimestepEmbedding( in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , ) __A : Dict = nn.ModuleList([]) __A : Dict = None __A : Tuple = nn.ModuleList([]) __A : Tuple = None # down __A : Any = in_channels for i, down_block_type in enumerate(_UpperCAmelCase): __A : Tuple = output_channel __A : Optional[Any] = block_out_channels[i] if i == 0: input_channel += extra_in_channels __A : List[str] = i == len(_UpperCAmelCase) - 1 __A : int = get_down_block( _UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(_UpperCAmelCase) # mid __A : str = get_mid_block( _UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , ) # up __A : Optional[int] = list(reversed(_UpperCAmelCase)) __A : Optional[int] = reversed_block_out_channels[0] if out_block_type is None: __A : str = out_channels else: __A : List[Any] = block_out_channels[0] for i, up_block_type in enumerate(_UpperCAmelCase): __A : Optional[Any] = output_channel __A : Optional[Any] = ( reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels ) __A : Dict = i == len(_UpperCAmelCase) - 1 __A : str = get_up_block( _UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(_UpperCAmelCase) __A : Optional[int] = output_channel # out __A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32) __A : Optional[Any] = get_out_block( out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , ) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ): '''simple docstring''' __A : Any = timestep if not torch.is_tensor(_UpperCAmelCase): __A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device) elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0: __A : Any = timesteps[None].to(sample.device) __A : List[Any] = self.time_proj(_UpperCAmelCase) if self.config.use_timestep_embedding: __A : Dict = self.time_mlp(_UpperCAmelCase) else: __A : Dict = timestep_embed[..., None] __A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) __A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:])) # 2. down __A : int = () for downsample_block in self.down_blocks: __A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase) down_block_res_samples += res_samples # 3. mid if self.mid_block: __A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase) # 4. up for i, upsample_block in enumerate(self.up_blocks): __A : Any = down_block_res_samples[-1:] __A : Optional[int] = down_block_res_samples[:-1] __A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase) # 5. post-process if self.out_block: __A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase) if not return_dict: return (sample,) return UNetaDOutput(sample=_UpperCAmelCase)
8
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Union[str, Any] = { '''configuration_blip_2''': [ '''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Blip2Config''', '''Blip2QFormerConfig''', '''Blip2VisionConfig''', ], '''processing_blip_2''': ['''Blip2Processor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Dict = [ '''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Blip2Model''', '''Blip2QFormerModel''', '''Blip2PreTrainedModel''', '''Blip2ForConditionalGeneration''', '''Blip2VisionModel''', ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
311
'''simple docstring''' def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int: if len(__snake_case ) != len(__snake_case ): raise ValueError('String lengths must match!' ) __A : Optional[Any] = 0 for chara, chara in zip(__snake_case , __snake_case ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
8
0
import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') lowerCAmelCase_ = parser.parse_args() if args.model_type == "bert": lowerCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name) lowerCAmelCase_ = '''bert''' else: raise ValueError('args.model_type should be "bert".') lowerCAmelCase_ = model.state_dict() lowerCAmelCase_ = {} for w in ["word_embeddings", "position_embeddings"]: lowerCAmelCase_ = state_dict[f'''{prefix}.embeddings.{w}.weight'''] for w in ["weight", "bias"]: lowerCAmelCase_ = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}'''] lowerCAmelCase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: lowerCAmelCase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}''' ] lowerCAmelCase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}''' ] lowerCAmelCase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}''' ] lowerCAmelCase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}''' ] lowerCAmelCase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}''' ] lowerCAmelCase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}''' ] lowerCAmelCase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}''' ] lowerCAmelCase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}''' ] std_idx += 1 lowerCAmelCase_ = state_dict['''cls.predictions.decoder.weight'''] lowerCAmelCase_ = state_dict['''cls.predictions.bias'''] if args.vocab_transform: for w in ["weight", "bias"]: lowerCAmelCase_ = state_dict[f'''cls.predictions.transform.dense.{w}'''] lowerCAmelCase_ = state_dict[f'''cls.predictions.transform.LayerNorm.{w}'''] print(f'''N layers selected for distillation: {std_idx}''') print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
217
'''simple docstring''' import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() lowercase__ : Tuple = logging.get_logger(__name__) def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]: __A : int = RobertaPreLayerNormConfig.from_pretrained( __snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] ) # convert state_dict __A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) ) __A : str = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.' ): __A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ): continue __A : str = tensor_value __A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case ) model.save_pretrained(__snake_case ) # convert tokenizer __A : List[Any] = AutoTokenizer.from_pretrained(__snake_case ) tokenizer.save_pretrained(__snake_case ) if __name__ == "__main__": lowercase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowercase__ : Optional[Any] = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
8
0
'''simple docstring''' def __snake_case ( _UpperCAmelCase : Dict): UpperCamelCase = [0] * len(__snake_case) UpperCamelCase = [] UpperCamelCase = [1] * len(__snake_case) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(__snake_case)): if indegree[i] == 0: queue.append(__snake_case) while queue: UpperCamelCase = queue.pop(0) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: UpperCamelCase = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(__snake_case) print(max(__snake_case)) # Adjacency list of Graph snake_case_ : Union[str, Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
212
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings lowercase__ : Dict = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} ) lowerCAmelCase = field( default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': ( '''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `max_length` value of the model configuration.''' ) } , ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': ( '''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `num_beams` value of the model configuration.''' ) } , ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.''' } , ) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = super().to_dict() for k, v in d.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase): __A : List[Any] = v.to_dict() return d
8
0
'''simple docstring''' import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( '''kwargs, expected''' , [ ({'''num_shards''': 0, '''max_num_jobs''': 1}, []), ({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]), ({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__snake_case , i + 1 ) for i in range(10 )]), ({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]), ({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def a_ ( __snake_case : List[Any] , __snake_case : Any ) -> Any: """simple docstring""" lowerCamelCase_ =_distribute_shards(**__snake_case ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, max_num_jobs, expected''' , [ ({'''foo''': 0}, 10, [{'''foo''': 0}]), ({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]), ({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]), ({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]), ({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]), ] , ) def a_ ( __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ =_split_gen_kwargs(__snake_case , __snake_case ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, expected''' , [ ({'''foo''': 0}, 1), ({'''shards''': [0]}, 1), ({'''shards''': [0, 1, 2, 3]}, 4), ({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4), ({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4), ({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError), ] , ) def a_ ( __snake_case : Dict , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" if expected is RuntimeError: with pytest.raises(__snake_case ): _number_of_shards_in_gen_kwargs(__snake_case ) else: lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case ) assert out == expected
676
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : List[Any] = logging.get_logger(__name__) lowercase__ : Optional[int] = { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = '''lxmert''' lowerCAmelCase = {} def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ): '''simple docstring''' __A : Tuple = vocab_size __A : int = hidden_size __A : str = num_attention_heads __A : Tuple = hidden_act __A : int = intermediate_size __A : str = hidden_dropout_prob __A : Optional[int] = attention_probs_dropout_prob __A : Optional[Any] = max_position_embeddings __A : Tuple = type_vocab_size __A : Optional[int] = initializer_range __A : Any = layer_norm_eps __A : Optional[Any] = num_qa_labels __A : Optional[int] = num_object_labels __A : Any = num_attr_labels __A : Union[str, Any] = l_layers __A : Optional[int] = x_layers __A : List[Any] = r_layers __A : Tuple = visual_feat_dim __A : Tuple = visual_pos_dim __A : Optional[int] = visual_loss_normalizer __A : int = task_matched __A : List[Any] = task_mask_lm __A : Optional[Any] = task_obj_predict __A : str = task_qa __A : List[Any] = visual_obj_loss __A : Optional[Any] = visual_attr_loss __A : Union[str, Any] = visual_feat_loss __A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers} super().__init__(**_UpperCAmelCase)
8
0
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : Optional[int] = logging.get_logger() def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = True ): print(f"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=__snake_case ) else: SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=__snake_case ) if hidden_sizes == 192: SCREAMING_SNAKE_CASE : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=__snake_case ) if hidden_sizes == 256: SCREAMING_SNAKE_CASE : List[Any] = timm.create_model('''levit_256''' , pretrained=__snake_case ) if hidden_sizes == 384: SCREAMING_SNAKE_CASE : List[str] = timm.create_model('''levit_384''' , pretrained=__snake_case ) from_model.eval() SCREAMING_SNAKE_CASE : Tuple = LevitForImageClassificationWithTeacher(__snake_case ).eval() SCREAMING_SNAKE_CASE : List[Any] = OrderedDict() SCREAMING_SNAKE_CASE : int = from_model.state_dict() SCREAMING_SNAKE_CASE : List[Any] = list(from_model.state_dict().keys() ) SCREAMING_SNAKE_CASE : Optional[int] = list(our_model.state_dict().keys() ) print(len(__snake_case ) , len(__snake_case ) ) for i in range(len(__snake_case ) ): SCREAMING_SNAKE_CASE : Optional[int] = weights[og_keys[i]] our_model.load_state_dict(__snake_case ) SCREAMING_SNAKE_CASE : Tuple = torch.randn((2, 3, 224, 224) ) SCREAMING_SNAKE_CASE : Optional[int] = from_model(__snake_case ) SCREAMING_SNAKE_CASE : List[str] = our_model(__snake_case ).logits assert torch.allclose(__snake_case , __snake_case ), "The model logits don't match the original one." SCREAMING_SNAKE_CASE : List[Any] = name print(__snake_case ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) SCREAMING_SNAKE_CASE : Optional[int] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f"""Pushed {checkpoint_name}""" ) def A ( _lowercase , _lowercase = None , _lowercase = True ): SCREAMING_SNAKE_CASE : Tuple = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE : List[str] = 1_000 SCREAMING_SNAKE_CASE : Any = (1, num_labels) SCREAMING_SNAKE_CASE : Dict = 'huggingface/label-files' SCREAMING_SNAKE_CASE : Optional[Any] = num_labels SCREAMING_SNAKE_CASE : str = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = {int(__snake_case ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Tuple = idalabel SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : List[Any] = partial(__snake_case , num_labels=__snake_case , idalabel=__snake_case , labelaid=__snake_case ) SCREAMING_SNAKE_CASE : Optional[Any] = { 'levit-128S': 128, 'levit-128': 128, 'levit-192': 192, 'levit-256': 256, 'levit-384': 384, } SCREAMING_SNAKE_CASE : Optional[Any] = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , __snake_case , names_to_config[model_name] , __snake_case , __snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , __snake_case , __snake_case , __snake_case , __snake_case ) return config, expected_shape if __name__ == "__main__": __UpperCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default=None, type=str, help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,', ) parser.add_argument( '--pytorch_dump_folder_path', default='levit-dump-folder/', type=Path, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) __UpperCamelCase : Optional[Any] = parser.parse_args() __UpperCamelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
248
'''simple docstring''' import math import sys def _lowerCAmelCase ( __snake_case : int ) -> int: if number != int(__snake_case ): raise ValueError('the value of input must be a natural number' ) if number < 0: raise ValueError('the value of input must not be a negative number' ) if number == 0: return 1 __A : str = [-1] * (number + 1) __A : Dict = 0 for i in range(1 , number + 1 ): __A : int = sys.maxsize __A : int = int(math.sqrt(__snake_case ) ) for j in range(1 , root + 1 ): __A : str = 1 + answers[i - (j**2)] __A : Dict = min(__snake_case , __snake_case ) __A : Union[str, Any] = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
8
0
import torch from diffusers import DiffusionPipeline class UpperCamelCase_ ( a__ ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int) ->str: '''simple docstring''' super().__init__() self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase) def __call__( self : str) ->Dict: '''simple docstring''' A__ = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) A__ = 1 A__ = self.unet(_UpperCAmelCase , _UpperCAmelCase).sample A__ = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase).prev_sample A__ = scheduler_output - scheduler_output + torch.ones_like(_UpperCAmelCase) return result
87
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]: __A : int = list(range(len(__snake_case ) ) ) __A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )] index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case ) __A : float = 0 __A : list[float] = [0] * len(__snake_case ) for i in index: if weight[i] <= capacity: __A : Optional[int] = 1 max_value += value[i] capacity -= weight[i] else: __A : List[Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
8
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PegasusXForConditionalGeneration''', '''PegasusXModel''', '''PegasusXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
443
'''simple docstring''' from __future__ import annotations import math class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase): '''simple docstring''' __A : int = size # approximate the overall size of segment tree with given value __A : Optional[Any] = [0 for i in range(0 , 4 * size)] # create array to store lazy update __A : Optional[Any] = [0 for i in range(0 , 4 * size)] __A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' return idx * 2 def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' return idx * 2 + 1 def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if left_element == right_element: __A : List[Any] = a[left_element - 1] else: __A : List[str] = (left_element + right_element) // 2 self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase) __A : Any = max( self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)]) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if self.flag[idx] is True: __A : Optional[Any] = self.lazy[idx] __A : Optional[Any] = False if left_element != right_element: __A : List[Any] = self.lazy[idx] __A : Dict = self.lazy[idx] __A : Tuple = True __A : Union[str, Any] = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: __A : Optional[int] = val if left_element != right_element: __A : Tuple = val __A : Any = val __A : Tuple = True __A : Union[str, Any] = True return True __A : str = (left_element + right_element) // 2 self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) __A : int = max( self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)]) return True def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if self.flag[idx] is True: __A : Union[str, Any] = self.lazy[idx] __A : List[str] = False if left_element != right_element: __A : Union[str, Any] = self.lazy[idx] __A : Optional[int] = self.lazy[idx] __A : str = True __A : Union[str, Any] = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] __A : Any = (left_element + right_element) // 2 __A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) __A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return max(_UpperCAmelCase , _UpperCAmelCase) def __str__( self): '''simple docstring''' return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)]) if __name__ == "__main__": lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] lowercase__ : str = 15 lowercase__ : List[Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 1_11) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 2_35) print(segt)
8
0
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean UpperCamelCase_ = 0 UpperCamelCase_ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right UpperCamelCase_ = tuple[int, int] class snake_case_ : '''simple docstring''' def __init__( self, A_, A_, A_, A_, A_, A_, ) -> Dict: UpperCAmelCase__ =pos_x UpperCAmelCase__ =pos_y UpperCAmelCase__ =(pos_y, pos_x) UpperCAmelCase__ =goal_x UpperCAmelCase__ =goal_y UpperCAmelCase__ =g_cost UpperCAmelCase__ =parent UpperCAmelCase__ =self.calculate_heuristic() UpperCAmelCase__ =self.g_cost + self.h_cost def __UpperCAmelCase ( self ) -> Dict: UpperCAmelCase__ =self.pos_x - self.goal_x UpperCAmelCase__ =self.pos_y - self.goal_y if HEURISTIC == 1: return abs(_UpperCAmelCase ) + abs(_UpperCAmelCase ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self, A_ ) -> List[str]: return self.f_cost < other.f_cost class snake_case_ : '''simple docstring''' def __init__( self, A_, A_ ) -> Optional[int]: UpperCAmelCase__ =Node(start[1], start[0], goal[1], goal[0], 0, _UpperCAmelCase ) UpperCAmelCase__ =Node(goal[1], goal[0], goal[1], goal[0], 9_9999, _UpperCAmelCase ) UpperCAmelCase__ =[self.start] UpperCAmelCase__ =[] UpperCAmelCase__ =False def __UpperCAmelCase ( self ) -> str: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCAmelCase__ =self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(_UpperCAmelCase ) self.closed_nodes.append(_UpperCAmelCase ) UpperCAmelCase__ =self.get_successors(_UpperCAmelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_UpperCAmelCase ) else: # retrieve the best current path UpperCAmelCase__ =self.open_nodes.pop(self.open_nodes.index(_UpperCAmelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_UpperCAmelCase ) else: self.open_nodes.append(_UpperCAmelCase ) return [self.start.pos] def __UpperCAmelCase ( self, A_ ) -> Dict: UpperCAmelCase__ =[] for action in delta: UpperCAmelCase__ =parent.pos_x + action[1] UpperCAmelCase__ =parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCAmelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _UpperCAmelCase, _UpperCAmelCase, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, _UpperCAmelCase, ) ) return successors def __UpperCAmelCase ( self, A_ ) -> List[str]: UpperCAmelCase__ =node UpperCAmelCase__ =[] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase__ =current_node.parent path.reverse() return path class snake_case_ : '''simple docstring''' def __init__( self, A_, A_ ) -> Optional[Any]: UpperCAmelCase__ =AStar(_UpperCAmelCase, _UpperCAmelCase ) UpperCAmelCase__ =AStar(_UpperCAmelCase, _UpperCAmelCase ) UpperCAmelCase__ =False def __UpperCAmelCase ( self ) -> Dict: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() UpperCAmelCase__ =self.fwd_astar.open_nodes.pop(0 ) UpperCAmelCase__ =self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( _UpperCAmelCase, _UpperCAmelCase ) self.fwd_astar.closed_nodes.append(_UpperCAmelCase ) self.bwd_astar.closed_nodes.append(_UpperCAmelCase ) UpperCAmelCase__ =current_bwd_node UpperCAmelCase__ =current_fwd_node UpperCAmelCase__ ={ self.fwd_astar: self.fwd_astar.get_successors(_UpperCAmelCase ), self.bwd_astar: self.bwd_astar.get_successors(_UpperCAmelCase ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(_UpperCAmelCase ) else: # retrieve the best current path UpperCAmelCase__ =astar.open_nodes.pop( astar.open_nodes.index(_UpperCAmelCase ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(_UpperCAmelCase ) else: astar.open_nodes.append(_UpperCAmelCase ) return [self.fwd_astar.start.pos] def __UpperCAmelCase ( self, A_, A_ ) -> str: UpperCAmelCase__ =self.fwd_astar.retrace_path(_UpperCAmelCase ) UpperCAmelCase__ =self.bwd_astar.retrace_path(_UpperCAmelCase ) bwd_path.pop() bwd_path.reverse() UpperCAmelCase__ =fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] UpperCamelCase_ = (0, 0) UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) UpperCamelCase_ = time.time() UpperCamelCase_ = AStar(init, goal) UpperCamelCase_ = a_star.search() UpperCamelCase_ = time.time() - start_time print(f"""AStar execution time = {end_time:f} seconds""") UpperCamelCase_ = time.time() UpperCamelCase_ = BidirectionalAStar(init, goal) UpperCamelCase_ = time.time() - bd_start_time print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
625
'''simple docstring''' def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float: __A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def _lowerCAmelCase ( ) -> Union[str, Any]: print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
8
0
'''simple docstring''' import numpy as np import datasets __A : Optional[int] = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' __A : List[str] = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' __A : Tuple = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def a__ ( self :List[str] ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """X""": datasets.Sequence(datasets.Value("""float""" ,id="""sequence""" ) ,id="""X""" ), } ) ,) def a__ ( self :str ,_UpperCamelCase :Tuple ,_UpperCamelCase :List[str] ): snake_case_ : Any = np.array(_UpperCAmelCase ) snake_case_ : Union[str, Any] = np.array(_UpperCAmelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("""Expected `X` to be a 2D vector""" ) if len(reference_distribution.shape ) != 2: raise ValueError("""Expected `reference_distribution` to be a 2D vector""" ) if reference_distribution.shape[0] < 2: raise ValueError( """Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" ) # Get mahalanobis distance for each prediction snake_case_ : Union[str, Any] = X - np.mean(_UpperCAmelCase ) snake_case_ : Tuple = np.cov(reference_distribution.T ) try: snake_case_ : str = np.linalg.inv(_UpperCAmelCase ) except np.linalg.LinAlgError: snake_case_ : List[Any] = np.linalg.pinv(_UpperCAmelCase ) snake_case_ : int = np.dot(_UpperCAmelCase ,_UpperCAmelCase ) snake_case_ : Union[str, Any] = np.dot(_UpperCAmelCase ,X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
334
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ): '''simple docstring''' __A : Optional[int] = parent __A : str = 13 __A : List[Any] = 7 __A : List[str] = True __A : str = True __A : Optional[Any] = True __A : int = True __A : Dict = 99 __A : Dict = 384 __A : Any = 2 __A : int = 4 __A : Optional[Any] = 37 __A : Optional[int] = 'gelu' __A : Dict = 0.1 __A : Optional[int] = 0.1 __A : Any = 512 __A : int = 16 __A : List[str] = 2 __A : str = 0.02 __A : Any = 3 __A : str = 4 __A : Union[str, Any] = 128 __A : int = 2 __A : List[Any] = 9 __A : List[Any] = 1 __A : List[Any] = None def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __A : str = None if self.use_input_mask: __A : List[Any] = random_attention_mask([self.batch_size, self.seq_length]) __A : Optional[Any] = None if self.use_token_type_ids: __A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __A : Optional[int] = None __A : List[str] = None __A : Dict = None if self.use_labels: __A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) __A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __A : str = ids_tensor([self.batch_size] , self.num_choices) __A : List[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : int = TFConvBertModel(config=_UpperCAmelCase) __A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __A : Tuple = [input_ids, input_mask] __A : Any = model(_UpperCAmelCase) __A : Dict = model(_UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase) __A : str = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : str = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Optional[int] = self.num_labels __A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase) __A : Optional[Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : Dict = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Tuple = self.num_choices __A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase) __A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1)) __A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1)) __A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1)) __A : int = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __A : Optional[Any] = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : List[Any] = self.num_labels __A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase) __A : str = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : int = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase) __A : Any = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : Union[str, Any] = model(_UpperCAmelCase) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[int] = self.prepare_config_and_inputs() ( ( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) , ) : Union[str, Any] = config_and_inputs __A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ): lowerCAmelCase = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) lowerCAmelCase = ( { '''feature-extraction''': TFConvBertModel, '''fill-mask''': TFConvBertForMaskedLM, '''question-answering''': TFConvBertForQuestionAnswering, '''text-classification''': TFConvBertForSequenceClassification, '''token-classification''': TFConvBertForTokenClassification, '''zero-shot''': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = TFConvBertModelTester(self) __A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __A : List[str] = True __A : List[str] = True if hasattr(_UpperCAmelCase , 'use_cache'): __A : List[Any] = True __A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length) __A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase) for model_class in self.all_model_classes: __A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase) __A : Optional[int] = model_class(_UpperCAmelCase) __A : Optional[Any] = len(model(_UpperCAmelCase)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase) __A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1') __A : Tuple = tf.keras.models.load_model(_UpperCAmelCase) __A : str = model(_UpperCAmelCase) if self.is_encoder_decoder: __A : Optional[int] = outputs['encoder_hidden_states'] __A : str = outputs['encoder_attentions'] else: __A : List[Any] = outputs['hidden_states'] __A : Optional[Any] = outputs['attentions'] self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase) __A : str = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase) self.assertListEqual( list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base') self.assertIsNotNone(_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common() __A : Any = True __A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length) __A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length) __A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase) __A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase) def check_decoder_attentions_output(_UpperCAmelCase): __A : List[str] = len(_UpperCAmelCase) self.assertEqual(out_len % 2 , 0) __A : Any = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_UpperCAmelCase): __A : str = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __A : Dict = True __A : Any = False __A : str = model_class(_UpperCAmelCase) __A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : List[str] = len(_UpperCAmelCase) self.assertEqual(config.output_hidden_states , _UpperCAmelCase) check_encoder_attentions_output(_UpperCAmelCase) if self.is_encoder_decoder: __A : Union[str, Any] = model_class(_UpperCAmelCase) __A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) self.assertEqual(config.output_hidden_states , _UpperCAmelCase) check_decoder_attentions_output(_UpperCAmelCase) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __A : int = True __A : Tuple = model_class(_UpperCAmelCase) __A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) self.assertEqual(config.output_hidden_states , _UpperCAmelCase) check_encoder_attentions_output(_UpperCAmelCase) # Check attention is always last and order is fine __A : Any = True __A : str = True __A : Union[str, Any] = model_class(_UpperCAmelCase) __A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase)) self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase) check_encoder_attentions_output(_UpperCAmelCase) @require_tf class SCREAMING_SNAKE_CASE (unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base') __A : str = tf.constant([[0, 1, 2, 3, 4, 5]]) __A : Optional[int] = model(_UpperCAmelCase)[0] __A : List[Any] = [1, 6, 768] self.assertEqual(output.shape , _UpperCAmelCase) __A : Tuple = tf.constant( [ [ [-0.03475493, -0.4686034, -0.30638832], [0.22637248, -0.26988646, -0.7423424], [0.10324868, -0.45013508, -0.58280784], ] ]) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4)
8
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCAmelCase_ : int = { '''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Optional[int] = [ '''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegaForCausalLM''', '''MegaForMaskedLM''', '''MegaForMultipleChoice''', '''MegaForQuestionAnswering''', '''MegaForSequenceClassification''', '''MegaForTokenClassification''', '''MegaModel''', '''MegaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys lowerCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
489
'''simple docstring''' import argparse import os import re lowercase__ : Optional[int] = '''src/diffusers''' # Pattern that looks at the indentation in a line. lowercase__ : Dict = re.compile(r'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''') # Pattern that matches `"key",` and puts `key` in group 0. lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowercase__ : str = re.compile(r'''\[([^\]]+)\]''') def _lowerCAmelCase ( __snake_case : str ) -> Tuple: __A : List[Any] = _re_indent.search(__snake_case ) return "" if search is None else search.groups()[0] def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]: __A : Tuple = 0 __A : Optional[int] = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(__snake_case ): index += 1 __A : Optional[int] = ['\n'.join(lines[:index] )] else: __A : Any = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). __A : Tuple = [lines[index]] index += 1 while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(__snake_case ) ) if index < len(__snake_case ) - 1: __A : Union[str, Any] = [lines[index + 1]] index += 1 else: __A : Union[str, Any] = [] else: blocks.append('\n'.join(__snake_case ) ) __A : Optional[Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(__snake_case ) > 0: blocks.append('\n'.join(__snake_case ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(__snake_case ): blocks.append('\n'.join(lines[index:] ) ) return blocks def _lowerCAmelCase ( __snake_case : List[Any] ) -> int: def _inner(__snake_case : List[Any] ): return key(__snake_case ).lower().replace('_' , '' ) return _inner def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]: # If no key is provided, we use a noop. def noop(__snake_case : List[Any] ): return x if key is None: __A : Optional[Any] = noop # Constants are all uppercase, they go first. __A : str = [obj for obj in objects if key(__snake_case ).isupper()] # Classes are not all uppercase but start with a capital, they go second. __A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()] # Functions begin with a lowercase, they go last. __A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()] __A : Tuple = ignore_underscore(__snake_case ) return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple: # This inner function sort imports between [ ]. def _replace(__snake_case : Tuple ): __A : List[str] = match.groups()[0] if "," not in imports: return f'[{imports}]' __A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __A : Dict = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]" __A : List[Any] = import_statement.split('\n' ) if len(__snake_case ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. __A : Optional[int] = 2 if lines[1].strip() == '[' else 1 __A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] __A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] ) __A : Any = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(__snake_case ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: __A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] ) else: __A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __A : Tuple = keys[:-1] __A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] ) return "\n".join(__snake_case ) else: # Finally we have to deal with imports fitting on one line __A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case ) return import_statement def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]: with open(__snake_case , 'r' ) as f: __A : Dict = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 __A : str = split_code_in_indented_blocks( __snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(__snake_case ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. __A : Tuple = main_blocks[block_idx] __A : int = block.split('\n' ) # Get to the start of the imports. __A : Tuple = 0 while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: __A : Optional[int] = len(__snake_case ) else: line_idx += 1 if line_idx >= len(__snake_case ): continue # Ignore beginning and last line: they don't contain anything. __A : Dict = '\n'.join(block_lines[line_idx:-1] ) __A : int = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. __A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case ) # We have two categories of import key: list or _import_structure[key].append/extend __A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. __A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. __A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None] __A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. __A : str = 0 __A : Any = [] for i in range(len(__snake_case ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: __A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(__snake_case ) count += 1 # And we put our main block back together with its first and last line. __A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(__snake_case ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(__snake_case , 'w' ) as f: f.write('\n'.join(__snake_case ) ) def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]: __A : Tuple = [] for root, _, files in os.walk(__snake_case ): if "__init__.py" in files: __A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case ) if result: __A : Dict = [os.path.join(__snake_case , '__init__.py' )] if len(__snake_case ) > 0: raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' ) if __name__ == "__main__": lowercase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') lowercase__ : Union[str, Any] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
8
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
330
'''simple docstring''' def _lowerCAmelCase ( __snake_case : int ) -> bool: return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('''Program to check whether a number is a Perfect number or not...''') lowercase__ : int = int(input('''Enter number: ''').strip()) print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
8
0
import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def A ( _SCREAMING_SNAKE_CASE ) -> int: lowerCamelCase : Union[str, Any] = SwinConfig(image_size=192 ) if "base" in model_name: lowerCamelCase : Any = 6 lowerCamelCase : Tuple = 128 lowerCamelCase : str = (2, 2, 18, 2) lowerCamelCase : Dict = (4, 8, 16, 32) elif "large" in model_name: lowerCamelCase : str = 12 lowerCamelCase : Tuple = 192 lowerCamelCase : str = (2, 2, 18, 2) lowerCamelCase : List[Any] = (6, 12, 24, 48) else: raise ValueError("Model not supported, only supports base and large variants" ) lowerCamelCase : int = window_size lowerCamelCase : Optional[int] = embed_dim lowerCamelCase : List[Any] = depths lowerCamelCase : Dict = num_heads return config def A ( _SCREAMING_SNAKE_CASE ) -> Dict: if "encoder.mask_token" in name: lowerCamelCase : str = name.replace("encoder.mask_token" ,"embeddings.mask_token" ) if "encoder.patch_embed.proj" in name: lowerCamelCase : Optional[int] = name.replace("encoder.patch_embed.proj" ,"embeddings.patch_embeddings.projection" ) if "encoder.patch_embed.norm" in name: lowerCamelCase : str = name.replace("encoder.patch_embed.norm" ,"embeddings.norm" ) if "attn.proj" in name: lowerCamelCase : Optional[Any] = name.replace("attn.proj" ,"attention.output.dense" ) if "attn" in name: lowerCamelCase : str = name.replace("attn" ,"attention.self" ) if "norm1" in name: lowerCamelCase : Any = name.replace("norm1" ,"layernorm_before" ) if "norm2" in name: lowerCamelCase : int = name.replace("norm2" ,"layernorm_after" ) if "mlp.fc1" in name: lowerCamelCase : str = name.replace("mlp.fc1" ,"intermediate.dense" ) if "mlp.fc2" in name: lowerCamelCase : Dict = name.replace("mlp.fc2" ,"output.dense" ) if name == "encoder.norm.weight": lowerCamelCase : str = 'layernorm.weight' if name == "encoder.norm.bias": lowerCamelCase : List[Any] = 'layernorm.bias' if "decoder" in name: pass else: lowerCamelCase : List[str] = 'swin.' + name return name def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]: for key in orig_state_dict.copy().keys(): lowerCamelCase : List[str] = orig_state_dict.pop(__snake_case ) if "attn_mask" in key: pass elif "qkv" in key: lowerCamelCase : Tuple = key.split("." ) lowerCamelCase : List[str] = int(key_split[2] ) lowerCamelCase : Dict = int(key_split[4] ) lowerCamelCase : Union[str, Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCamelCase : str = val[:dim, :] lowerCamelCase : Dict = val[ dim : dim * 2, : ] lowerCamelCase : Any = val[-dim:, :] else: lowerCamelCase : Tuple = val[ :dim ] lowerCamelCase : Any = val[ dim : dim * 2 ] lowerCamelCase : Union[str, Any] = val[ -dim: ] else: lowerCamelCase : Tuple = val return orig_state_dict def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowerCamelCase : Optional[int] = torch.load(__snake_case ,map_location="cpu" )['model'] lowerCamelCase : str = get_swin_config(__snake_case ) lowerCamelCase : str = SwinForMaskedImageModeling(__snake_case ) model.eval() lowerCamelCase : Tuple = convert_state_dict(__snake_case ,__snake_case ) model.load_state_dict(__snake_case ) lowerCamelCase : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCamelCase : Optional[int] = ViTImageProcessor(size={"height": 192, "width": 192} ) lowerCamelCase : Tuple = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw ) lowerCamelCase : List[Any] = image_processor(images=__snake_case ,return_tensors="pt" ) with torch.no_grad(): lowerCamelCase : List[str] = model(**__snake_case ).logits print(outputs.keys() ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__snake_case ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__snake_case ) if push_to_hub: print(f'''Pushing model and image processor for {model_name} to hub''' ) model.push_to_hub(f'''microsoft/{model_name}''' ) image_processor.push_to_hub(f'''microsoft/{model_name}''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='swin-base-simmim-window6-192', type=str, choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'], help='Name of the Swin SimMIM model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth', type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
311
'''simple docstring''' import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() lowercase__ : Tuple = logging.get_logger(__name__) lowercase__ : str = [ ['''attention''', '''attn'''], ['''encoder_attention''', '''encoder_attn'''], ['''q_lin''', '''q_proj'''], ['''k_lin''', '''k_proj'''], ['''v_lin''', '''v_proj'''], ['''out_lin''', '''out_proj'''], ['''norm_embeddings''', '''layernorm_embedding'''], ['''position_embeddings''', '''embed_positions'''], ['''embeddings''', '''embed_tokens'''], ['''ffn.lin''', '''fc'''], ] def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple: if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: __A : Optional[Any] = k.replace(__snake_case , __snake_case ) if k.startswith('encoder' ): __A : Any = k.replace('.attn' , '.self_attn' ) __A : Any = k.replace('norm1' , 'self_attn_layer_norm' ) __A : str = k.replace('norm2' , 'final_layer_norm' ) elif k.startswith('decoder' ): __A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' ) __A : str = k.replace('norm2' , 'encoder_attn_layer_norm' ) __A : int = k.replace('norm3' , 'final_layer_norm' ) return k def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict: __A : Optional[int] = [ 'model.encoder.layernorm_embedding.weight', 'model.encoder.layernorm_embedding.bias', 'model.decoder.layernorm_embedding.weight', 'model.decoder.layernorm_embedding.bias', ] for k in keys: __A : Tuple = sd.pop(__snake_case ) __A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' ) assert new_k not in sd __A : str = v lowercase__ : Tuple = ['''START'''] @torch.no_grad() def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int: __A : List[str] = torch.load(__snake_case , map_location='cpu' ) __A : Tuple = model['model'] __A : str = BlenderbotConfig.from_json_file(__snake_case ) __A : int = BlenderbotForConditionalGeneration(__snake_case ) __A : List[Any] = m.model.state_dict().keys() __A : Optional[int] = [] __A : Optional[int] = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue __A : Union[str, Any] = rename_state_dict_key(__snake_case ) if new_k not in valid_keys: failures.append([k, new_k] ) else: __A : Optional[Any] = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(__snake_case ) m.model.load_state_dict(__snake_case , strict=__snake_case ) m.half() m.save_pretrained(__snake_case ) if __name__ == "__main__": lowercase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''') parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''') parser.add_argument( '''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use''' ) lowercase__ : Optional[Any] = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
8
0
import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' lowercase : List[Any] = TapasConfig.from_json_file(__snake_case ) # set absolute/relative position embeddings parameter lowercase : List[str] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": lowercase : int = TapasForQuestionAnswering(config=__snake_case ) elif task == "WTQ": # run_task_main.py hparams lowercase : Optional[Any] = 4 lowercase : Tuple = True # hparam_utils.py hparams lowercase : Optional[Any] = 0.6_6_4_6_9_4 lowercase : Any = 0.2_0_7_9_5_1 lowercase : int = 0.1_2_1_1_9_4 lowercase : Dict = True lowercase : Optional[Any] = True lowercase : Union[str, Any] = False lowercase : str = 0.0_3_5_2_5_1_3 lowercase : Union[str, Any] = TapasForQuestionAnswering(config=__snake_case ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams lowercase : Optional[Any] = 4 lowercase : List[str] = False # hparam_utils.py hparams lowercase : Union[str, Any] = 3_6.4_5_1_9 lowercase : Optional[int] = 0.9_0_3_4_2_1 lowercase : Optional[Any] = 2_2_2.0_8_8 lowercase : Union[str, Any] = True lowercase : List[Any] = True lowercase : str = True lowercase : List[Any] = 0.7_6_3_1_4_1 lowercase : Any = TapasForQuestionAnswering(config=__snake_case ) elif task == "TABFACT": lowercase : int = TapasForSequenceClassification(config=__snake_case ) elif task == "MLM": lowercase : int = TapasForMaskedLM(config=__snake_case ) elif task == "INTERMEDIATE_PRETRAINING": lowercase : Tuple = TapasModel(config=__snake_case ) else: raise ValueError(F"""Task {task} not supported.""" ) print(F"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__snake_case , __snake_case , __snake_case ) # Save pytorch-model (weights and configuration) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__snake_case ) # Save tokenizer files print(F"""Save tokenizer files to {pytorch_dump_path}""" ) lowercase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=5_12 ) tokenizer.save_pretrained(__snake_case ) print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.' ) parser.add_argument( '--reset_position_index_per_cell', default=False, action='store_true', help='Whether to use relative position embeddings or not. Defaults to True.', ) parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--tapas_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained TAPAS model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
217
'''simple docstring''' import mpmath # for roots of unity import numpy as np class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None): '''simple docstring''' __A : List[Any] = list(poly_a or [0])[:] __A : Optional[int] = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() __A : Union[str, Any] = len(self.polyA) while self.polyB[-1] == 0: self.polyB.pop() __A : Optional[int] = len(self.polyB) # Add 0 to make lengths equal a power of 2 __A : Optional[Any] = int( 2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1))) while len(self.polyA) < self.c_max_length: self.polyA.append(0) while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform __A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1)) # The product __A : Tuple = self.__multiply() def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' __A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB] # Corner case if len(_UpperCAmelCase) <= 1: return dft[0] # __A : Dict = self.c_max_length // 2 while next_ncol > 0: __A : Optional[Any] = [[] for i in range(_UpperCAmelCase)] __A : Tuple = self.root**next_ncol # First half of next step __A : Optional[Any] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(_UpperCAmelCase): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step __A : List[str] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(_UpperCAmelCase): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root # Update __A : Optional[int] = new_dft __A : Tuple = next_ncol // 2 return dft[0] def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.__dft('A') __A : Optional[Any] = self.__dft('B') __A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] del dft_a del dft_b # Corner Case if len(inverce_c[0]) <= 1: return inverce_c[0] # Inverse DFT __A : Dict = 2 while next_ncol <= self.c_max_length: __A : Optional[int] = [[] for i in range(_UpperCAmelCase)] __A : Any = self.root ** (next_ncol // 2) __A : Tuple = 1 # First half of next step for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root)) current_root *= root # Update __A : int = new_inverse_c next_ncol *= 2 # Unpack __A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self): '''simple docstring''' __A : int = 'A = ' + ' + '.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A])) __A : Optional[Any] = 'B = ' + ' + '.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B])) __A : str = 'A*B = ' + ' + '.join( F'{coef}*x^{i}' for coef, i in enumerate(self.product)) return F'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
8
0
'''simple docstring''' import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowercase__ ( a__, unittest.TestCase ): '''simple docstring''' _snake_case = BioGptTokenizer _snake_case = False def UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCamelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] UpperCamelCase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(_UpperCAmelCase ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(_UpperCAmelCase ) ) def UpperCAmelCase ( self , lowerCamelCase__ ): '''simple docstring''' UpperCamelCase = 'lower newer' UpperCamelCase = 'lower newer' return input_text, output_text def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = BioGptTokenizer(self.vocab_file , self.merges_file ) UpperCamelCase = 'lower' UpperCamelCase = ['low', 'er</w>'] UpperCamelCase = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase = tokens + ['<unk>'] UpperCamelCase = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) @slow def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=_UpperCAmelCase ) UpperCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_UpperCAmelCase ) UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ) UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
212
'''simple docstring''' import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ): '''simple docstring''' __A : Union[str, Any] = parent __A : Tuple = batch_size __A : List[str] = image_size __A : Dict = patch_size __A : Optional[Any] = num_channels __A : Tuple = is_training __A : Dict = use_labels __A : List[Any] = hidden_size __A : Tuple = num_hidden_layers __A : int = num_attention_heads __A : Optional[int] = intermediate_size __A : Tuple = hidden_act __A : Any = hidden_dropout_prob __A : Optional[Any] = attention_probs_dropout_prob __A : List[Any] = type_sequence_label_size __A : List[Any] = initializer_range __A : Optional[int] = num_labels __A : List[Any] = scope __A : Any = n_targets __A : Union[str, Any] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens __A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size) __A : int = num_patches + 1 + self.num_detection_tokens def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]]) __A : Tuple = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) __A : List[Any] = [] for i in range(self.batch_size): __A : Optional[int] = {} __A : Union[str, Any] = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase) __A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase) labels.append(_UpperCAmelCase) __A : Any = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Any = YolosModel(config=_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : Dict = model(_UpperCAmelCase) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Any = YolosForObjectDetection(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : str = model(pixel_values=_UpperCAmelCase) __A : List[str] = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4)) __A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Any = self.prepare_config_and_inputs() __A ,__A ,__A : Tuple = config_and_inputs __A : Tuple = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ): lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else () lowerCAmelCase = ( {'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False): '''simple docstring''' __A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase) if return_labels: if model_class.__name__ == "YolosForObjectDetection": __A : Any = [] for i in range(self.model_tester.batch_size): __A : Tuple = {} __A : Tuple = torch.ones( size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long) __A : Optional[Any] = torch.ones( self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float) labels.append(_UpperCAmelCase) __A : str = labels return inputs_dict def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = YolosModelTester(self) __A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : Tuple = model_class(_UpperCAmelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) __A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : List[Any] = model_class(_UpperCAmelCase) __A : str = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __A : int = [*signature.parameters.keys()] __A : List[str] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common() __A : Optional[int] = True # in YOLOS, the seq_len is different __A : Dict = self.model_tester.expected_seq_len for model_class in self.all_model_classes: __A : Dict = True __A : Dict = False __A : Union[str, Any] = True __A : Tuple = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Union[str, Any] = outputs.attentions self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] __A : List[Any] = True __A : List[str] = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Optional[Any] = outputs.attentions self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) __A : str = len(_UpperCAmelCase) # Check attention is always last and order is fine __A : Dict = True __A : Dict = True __A : Dict = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Union[str, Any] = 1 self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase)) __A : Optional[Any] = outputs.attentions self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): __A : Tuple = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Optional[Any] = outputs.hidden_states __A : List[str] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase) # YOLOS has a different seq_length __A : Dict = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) __A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : List[str] = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __A : Optional[int] = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase) self.assertIsNotNone(_UpperCAmelCase) def _lowerCAmelCase ( ) -> int: __A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE (unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase) __A : Any = self.default_image_processor __A : str = prepare_img() __A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase) # forward pass with torch.no_grad(): __A : str = model(inputs.pixel_values) # verify outputs __A : Tuple = torch.Size((1, 100, 92)) self.assertEqual(outputs.logits.shape , _UpperCAmelCase) __A : Dict = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , ) __A : int = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4)) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4)) # verify postprocessing __A : List[str] = image_processor.post_process_object_detection( _UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0] __A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase) __A : Union[str, Any] = [75, 75, 17, 63, 17] __A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase) self.assertEqual(len(results['scores']) , 5) self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4)) self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase) self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase))
8
0
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels a_ : List[str] = object() # For specifying empty leaf dict `{}` a_ : str = object() def a_ ( __snake_case : Any , __snake_case : Dict ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(__snake_case ) - len(__snake_case ) + 1 ): lowerCamelCase_ =[x.match(__snake_case ) for x, y in zip(__snake_case , ks[i:] )] if matches and all(__snake_case ): return True return False def a_ ( __snake_case : Tuple ) -> Optional[Any]: """simple docstring""" def replace(__snake_case : int , __snake_case : Optional[Any] ): for rule, replacement in rules: if _match(__snake_case , __snake_case ): return replacement return val return replace def a_ ( ) -> Dict: """simple docstring""" return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , __snake_case )), (("transformer", "wte", "embedding"), P('''mp''' , __snake_case )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__snake_case , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , __snake_case )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__snake_case , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , __snake_case )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def a_ ( __snake_case : str ) -> Tuple: """simple docstring""" lowerCamelCase_ =_get_partition_rules() lowerCamelCase_ =_replacement_rules(__snake_case ) lowerCamelCase_ ={k: _unmatched for k in flatten_dict(__snake_case )} lowerCamelCase_ ={k: replace(__snake_case , __snake_case ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__snake_case ) )
676
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: lowercase__ : Optional[int] = None lowercase__ : List[str] = logging.get_logger(__name__) lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} lowercase__ : List[str] = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''', }, } lowercase__ : Dict = { '''camembert-base''': 5_12, } lowercase__ : str = '''▁''' class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = ['''input_ids''', '''attention_mask'''] lowerCAmelCase = CamembertTokenizer def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ): '''simple docstring''' __A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , ) __A : List[str] = vocab_file __A : Optional[int] = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __A : Optional[Any] = [self.cls_token_id] __A : Optional[int] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None): '''simple docstring''' __A : Optional[int] = [self.sep_token_id] __A : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(_UpperCAmelCase): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return __A : List[Any] = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase): copyfile(self.vocab_file , _UpperCAmelCase) return (out_vocab_file,)
8
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowercase__ ( a__): UpperCamelCase_ = ["""image_processor""", """tokenizer"""] UpperCamelCase_ = """AutoImageProcessor""" UpperCamelCase_ = """AutoTokenizer""" def __init__( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ): '''simple docstring''' super().__init__(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor def __call__( self : List[str] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: SCREAMING_SNAKE_CASE : int = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if images is not None: SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if text is not None and images is not None: SCREAMING_SNAKE_CASE : int = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase ) def __A ( self : List[str] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[int] ): '''simple docstring''' return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def __A ( self : Tuple , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : int ): '''simple docstring''' return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property def __A ( self : List[Any] ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
248
'''simple docstring''' import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowercase__ : Any = '''hf-internal-testing/tiny-random-bert''' lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''') lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6''' class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_UpperCAmelCase)) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase))) with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f: __A : Any = f.read() self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase)) self.assertTrue(os.path.isfile(_UpperCAmelCase)) # File is cached at the same place the second time. __A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase) # Using a specific revision to test the full commit hash. __A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223') self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'): __A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase) with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'): __A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa') with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'): __A : int = cached_file(_UpperCAmelCase , 'conf') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'): __A : Any = cached_file(_UpperCAmelCase , 'conf') with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f: __A : Dict = f.read() self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf'))) __A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase) self.assertIsNone(_UpperCAmelCase) __A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase) self.assertIsNone(_UpperCAmelCase) __A : List[str] = mock.Mock() __A : Dict = 500 __A : List[str] = {} __A : List[Any] = HTTPError __A : Optional[Any] = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head: __A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase) self.assertIsNone(_UpperCAmelCase) # This check we did call the fake head request mock_head.assert_called() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase)) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase)) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt')) # The function raises if the repository does not exist. with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'): get_file_from_repo('bert-base-case' , _UpperCAmelCase) # The function raises if the revision does not exist. with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'): get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha') __A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase) # The name is the cached name which is not very easy to test, so instead we load the content. __A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read()) self.assertEqual(config['hidden_size'] , 768) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: __A : Tuple = Path(_UpperCAmelCase) / 'a.txt' filename.touch() self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase)) self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt'))
8
0
def SCREAMING_SNAKE_CASE ( lowercase_ = 10 ) -> str: """simple docstring""" if not isinstance(__snake_case , __snake_case ) or n < 0: raise ValueError('''Invalid input''' ) A__ = 10**n A__ = 28_433 * (pow(2 , 7_830_457 , __snake_case )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(10) = }''')
87
'''simple docstring''' import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any: __A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case ) __A : int = AutoModelForSeqaSeqLM.from_config(__snake_case ) model.save_pretrained(__snake_case ) AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
8
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''microsoft/trocr-base-handwritten''': ( '''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json''' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class lowercase_ ( a__ ): __lowerCamelCase = "trocr" __lowerCamelCase = ["past_key_values"] __lowerCamelCase = { "num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model", "num_hidden_layers": "decoder_layers", } def __init__( self , __A=50_265 , __A=1_024 , __A=12 , __A=16 , __A=4_096 , __A="gelu" , __A=512 , __A=0.1 , __A=0.0 , __A=0.0 , __A=2 , __A=0.02 , __A=0.0 , __A=True , __A=False , __A=True , __A=True , __A=1 , __A=0 , __A=2 , **__A , ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ : List[str] =vocab_size SCREAMING_SNAKE_CASE_ : List[str] =d_model SCREAMING_SNAKE_CASE_ : List[Any] =decoder_layers SCREAMING_SNAKE_CASE_ : List[Any] =decoder_attention_heads SCREAMING_SNAKE_CASE_ : List[str] =decoder_ffn_dim SCREAMING_SNAKE_CASE_ : Union[str, Any] =activation_function SCREAMING_SNAKE_CASE_ : str =max_position_embeddings SCREAMING_SNAKE_CASE_ : Tuple =dropout SCREAMING_SNAKE_CASE_ : Union[str, Any] =attention_dropout SCREAMING_SNAKE_CASE_ : List[Any] =activation_dropout SCREAMING_SNAKE_CASE_ : Optional[int] =init_std SCREAMING_SNAKE_CASE_ : int =decoder_layerdrop SCREAMING_SNAKE_CASE_ : List[Any] =use_cache SCREAMING_SNAKE_CASE_ : str =scale_embedding SCREAMING_SNAKE_CASE_ : Optional[int] =use_learned_position_embeddings SCREAMING_SNAKE_CASE_ : Optional[Any] =layernorm_embedding super().__init__( pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
443
'''simple docstring''' from ...configuration_utils import PretrainedConfig lowercase__ : Any = { '''google/tapas-base-finetuned-sqa''': ( '''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wtq''': ( '''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wikisql-supervised''': ( '''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json''' ), '''google/tapas-base-finetuned-tabfact''': ( '''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json''' ), } class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = '''tapas''' def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ): '''simple docstring''' super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) __A : Dict = vocab_size __A : Tuple = hidden_size __A : Any = num_hidden_layers __A : int = num_attention_heads __A : Tuple = hidden_act __A : Tuple = intermediate_size __A : List[Any] = hidden_dropout_prob __A : int = attention_probs_dropout_prob __A : List[str] = max_position_embeddings __A : Optional[int] = type_vocab_sizes __A : str = initializer_range __A : List[str] = layer_norm_eps # Fine-tuning task hyperparameters __A : List[str] = positive_label_weight __A : List[Any] = num_aggregation_labels __A : Optional[Any] = aggregation_loss_weight __A : Tuple = use_answer_as_supervision __A : List[str] = answer_loss_importance __A : Any = use_normalized_answer_loss __A : Any = huber_loss_delta __A : Union[str, Any] = temperature __A : Tuple = aggregation_temperature __A : Optional[Any] = use_gumbel_for_cells __A : List[str] = use_gumbel_for_aggregation __A : Tuple = average_approximation_function __A : List[str] = cell_selection_preference __A : Dict = answer_loss_cutoff __A : Union[str, Any] = max_num_rows __A : Optional[Any] = max_num_columns __A : int = average_logits_per_cell __A : Optional[Any] = select_one_column __A : int = allow_empty_column_selection __A : List[Any] = init_cell_selection_weights_to_zero __A : int = reset_position_index_per_cell __A : Union[str, Any] = disable_per_token_loss # Aggregation hyperparameters __A : Optional[Any] = aggregation_labels __A : List[str] = no_aggregation_label_index if isinstance(self.aggregation_labels , _UpperCAmelCase): __A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()}
8
0
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def _UpperCAmelCase ( A , A ): '''simple docstring''' UpperCAmelCase__ =XCLIPTextConfig() # derive patch size from model name UpperCAmelCase__ =model_name.find("patch" ) UpperCAmelCase__ =int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] ) UpperCAmelCase__ =XCLIPVisionConfig(patch_size=__snake_case , num_frames=__snake_case ) if "large" in model_name: UpperCAmelCase__ =768 UpperCAmelCase__ =3072 UpperCAmelCase__ =12 UpperCAmelCase__ =1024 UpperCAmelCase__ =4096 UpperCAmelCase__ =16 UpperCAmelCase__ =24 UpperCAmelCase__ =768 UpperCAmelCase__ =3072 if model_name == "xclip-large-patch14-16-frames": UpperCAmelCase__ =336 UpperCAmelCase__ =XCLIPConfig.from_text_vision_configs(__snake_case , __snake_case ) if "large" in model_name: UpperCAmelCase__ =768 return config def _UpperCAmelCase ( A ): '''simple docstring''' if name == "token_embedding.weight": UpperCAmelCase__ =name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" ) if name == "positional_embedding": UpperCAmelCase__ =name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" ) if "ln_1" in name: UpperCAmelCase__ =name.replace("ln_1" , "layer_norm1" ) if "ln_2" in name: UpperCAmelCase__ =name.replace("ln_2" , "layer_norm2" ) if "c_fc" in name: UpperCAmelCase__ =name.replace("c_fc" , "fc1" ) if "c_proj" in name: UpperCAmelCase__ =name.replace("c_proj" , "fc2" ) if name.startswith("transformer.resblocks" ): UpperCAmelCase__ =name.replace("transformer.resblocks" , "text_model.encoder.layers" ) if "attn.out_proj" in name and "message" not in name: UpperCAmelCase__ =name.replace("attn.out_proj" , "self_attn.out_proj" ) if "ln_final" in name: UpperCAmelCase__ =name.replace("ln_final" , "text_model.final_layer_norm" ) # visual encoder if name == "visual.class_embedding": UpperCAmelCase__ =name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" ) if name == "visual.positional_embedding": UpperCAmelCase__ =name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" ) if name.startswith("visual.transformer.resblocks" ): UpperCAmelCase__ =name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" ) if "visual.conv1" in name: UpperCAmelCase__ =name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" ) if "visual.ln_pre" in name: UpperCAmelCase__ =name.replace("visual.ln_pre" , "vision_model.pre_layernorm" ) if "visual.ln_post" in name: UpperCAmelCase__ =name.replace("visual.ln_post" , "vision_model.post_layernorm" ) if "visual.proj" in name: UpperCAmelCase__ =name.replace("visual.proj" , "visual_projection.weight" ) if "text_projection" in name: UpperCAmelCase__ =name.replace("text_projection" , "text_projection.weight" ) # things on top if "prompts_visual_proj" in name: UpperCAmelCase__ =name.replace("prompts_visual_proj" , "prompts_visual_projection" ) if "prompts_visual_ln" in name: UpperCAmelCase__ =name.replace("prompts_visual_ln" , "prompts_visual_layernorm" ) # mit if name == "mit.positional_embedding": UpperCAmelCase__ =name.replace("positional" , "position" ) if name.startswith("mit.resblocks" ): UpperCAmelCase__ =name.replace("mit.resblocks" , "mit.encoder.layers" ) # prompts generator if name.startswith("prompts_generator.norm" ): UpperCAmelCase__ =name.replace("prompts_generator.norm" , "prompts_generator.layernorm" ) return name def _UpperCAmelCase ( A , A ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase__ =orig_state_dict.pop(__snake_case ) if "attn.in_proj" in key: UpperCAmelCase__ =key.split("." ) if key.startswith("visual" ): UpperCAmelCase__ =key_split[3] UpperCAmelCase__ =config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: UpperCAmelCase__ =val[ :dim, : ] UpperCAmelCase__ =val[ dim : dim * 2, : ] UpperCAmelCase__ =val[ -dim:, : ] else: UpperCAmelCase__ =val[ :dim ] UpperCAmelCase__ =val[ dim : dim * 2 ] UpperCAmelCase__ =val[ -dim: ] else: if "weight" in key: UpperCAmelCase__ =val[ :dim, : ] UpperCAmelCase__ =val[ dim : dim * 2, : ] UpperCAmelCase__ =val[ -dim:, : ] else: UpperCAmelCase__ =val[:dim] UpperCAmelCase__ =val[ dim : dim * 2 ] UpperCAmelCase__ =val[-dim:] elif key.startswith("mit" ): UpperCAmelCase__ =key_split[2] UpperCAmelCase__ =config.vision_config.mit_hidden_size if "weight" in key: UpperCAmelCase__ =val[:dim, :] UpperCAmelCase__ =val[dim : dim * 2, :] UpperCAmelCase__ =val[-dim:, :] else: UpperCAmelCase__ =val[:dim] UpperCAmelCase__ =val[dim : dim * 2] UpperCAmelCase__ =val[-dim:] else: UpperCAmelCase__ =key_split[2] UpperCAmelCase__ =config.text_config.hidden_size if "weight" in key: UpperCAmelCase__ =val[:dim, :] UpperCAmelCase__ =val[ dim : dim * 2, : ] UpperCAmelCase__ =val[-dim:, :] else: UpperCAmelCase__ =val[:dim] UpperCAmelCase__ =val[ dim : dim * 2 ] UpperCAmelCase__ =val[-dim:] else: UpperCAmelCase__ =rename_key(__snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: UpperCAmelCase__ =val.T UpperCAmelCase__ =val return orig_state_dict def _UpperCAmelCase ( A ): '''simple docstring''' if num_frames == 8: UpperCAmelCase__ ='eating_spaghetti_8_frames.npy' elif num_frames == 16: UpperCAmelCase__ ='eating_spaghetti.npy' elif num_frames == 32: UpperCAmelCase__ ='eating_spaghetti_32_frames.npy' UpperCAmelCase__ =hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename=__snake_case , repo_type="dataset" , ) UpperCAmelCase__ =np.load(__snake_case ) return list(__snake_case ) def _UpperCAmelCase ( A , A=None , A=False ): '''simple docstring''' UpperCAmelCase__ ={ # fully supervised kinetics-400 checkpoints 'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth', 'xclip-base-patch32-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth' ), 'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth', 'xclip-base-patch16-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth' ), 'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb', 'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f', # fully supervised kinetics-600 checkpoints 'xclip-base-patch16-kinetics-600': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth' ), 'xclip-base-patch16-kinetics-600-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth' ), 'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be', # few shot 'xclip-base-patch16-hmdb-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth' ), 'xclip-base-patch16-hmdb-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth' ), 'xclip-base-patch16-hmdb-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth' ), 'xclip-base-patch16-hmdb-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth' ), 'xclip-base-patch16-ucf-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth' ), 'xclip-base-patch16-ucf-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth' ), 'xclip-base-patch16-ucf-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth' ), 'xclip-base-patch16-ucf-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth' ), # zero shot 'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth', } UpperCAmelCase__ =model_to_url[model_name] UpperCAmelCase__ =8 if "16-frames" in model_name: UpperCAmelCase__ =16 elif "shot" in model_name: UpperCAmelCase__ =32 UpperCAmelCase__ =get_xclip_config(__snake_case , __snake_case ) UpperCAmelCase__ =XCLIPModel(__snake_case ) model.eval() if "drive" in checkpoint_url: UpperCAmelCase__ ='pytorch_model.bin' gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case ) UpperCAmelCase__ =torch.load(__snake_case , map_location="cpu" )['model'] else: UpperCAmelCase__ =torch.hub.load_state_dict_from_url(__snake_case )['model'] UpperCAmelCase__ =convert_state_dict(__snake_case , __snake_case ) UpperCAmelCase__ =XCLIPModel(__snake_case ) UpperCAmelCase__ =model.load_state_dict(__snake_case , strict=__snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() UpperCAmelCase__ =336 if model_name == 'xclip-large-patch14-16-frames' else 224 UpperCAmelCase__ =VideoMAEImageProcessor(size=__snake_case ) UpperCAmelCase__ =CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" ) UpperCAmelCase__ =CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" ) UpperCAmelCase__ =XCLIPProcessor(image_processor=__snake_case , tokenizer=__snake_case ) UpperCAmelCase__ =prepare_video(__snake_case ) UpperCAmelCase__ =processor( text=["playing sports", "eating spaghetti", "go shopping"] , videos=__snake_case , return_tensors="pt" , padding=__snake_case ) print("Shape of pixel values:" , inputs.pixel_values.shape ) with torch.no_grad(): UpperCAmelCase__ =model(**__snake_case ) # Verify outputs UpperCAmelCase__ =outputs.logits_per_video UpperCAmelCase__ =logits_per_video.softmax(dim=1 ) print("Probs:" , __snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": UpperCAmelCase__ =torch.tensor([[0.00_19, 0.99_51, 0.00_30]] ) elif model_name == "xclip-base-patch32-16-frames": UpperCAmelCase__ =torch.tensor([[7.0_9_9_9e-0_4, 9.9_8_8_3e-0_1, 4.5_5_8_0e-0_4]] ) elif model_name == "xclip-base-patch16": UpperCAmelCase__ =torch.tensor([[0.00_83, 0.96_81, 0.02_36]] ) elif model_name == "xclip-base-patch16-16-frames": UpperCAmelCase__ =torch.tensor([[7.6_9_3_7e-0_4, 9.9_7_2_8e-0_1, 1.9_4_7_3e-0_3]] ) elif model_name == "xclip-large-patch14": UpperCAmelCase__ =torch.tensor([[0.00_62, 0.98_64, 0.00_75]] ) elif model_name == "xclip-large-patch14-16-frames": UpperCAmelCase__ =torch.tensor([[3.3_8_7_7e-0_4, 9.9_9_3_7e-0_1, 2.8_8_8_8e-0_4]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": UpperCAmelCase__ =torch.tensor([[0.05_55, 0.89_14, 0.05_31]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": UpperCAmelCase__ =torch.tensor([[3.8_5_5_4e-0_4, 9.9_9_2_9e-0_1, 3.2_7_5_4e-0_4]] ) elif model_name == "xclip-large-patch14-kinetics-600": UpperCAmelCase__ =torch.tensor([[0.00_36, 0.99_20, 0.00_45]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": UpperCAmelCase__ =torch.tensor([[7.1_8_9_0e-0_6, 9.9_9_9_4e-0_1, 5.6_5_5_9e-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": UpperCAmelCase__ =torch.tensor([[1.0_3_2_0e-0_5, 9.9_9_9_3e-0_1, 6.2_4_3_5e-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": UpperCAmelCase__ =torch.tensor([[4.1_3_7_7e-0_6, 9.9_9_9_0e-0_1, 9.8_3_8_6e-0_5]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": UpperCAmelCase__ =torch.tensor([[4.1_3_4_7e-0_5, 9.9_9_6_2e-0_1, 3.3_4_1_1e-0_4]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": UpperCAmelCase__ =torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": UpperCAmelCase__ =torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": UpperCAmelCase__ =torch.tensor([[0.00_27, 0.99_04, 0.00_70]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": UpperCAmelCase__ =torch.tensor([[9.8_2_1_9e-0_4, 9.9_5_9_3e-0_1, 3.0_8_6_3e-0_3]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": UpperCAmelCase__ =torch.tensor([[3.5_0_8_2e-0_4, 9.9_7_8_5e-0_1, 1.7_9_6_6e-0_3]] ) else: raise ValueError(F"""Model name {model_name} not supported""" ) assert torch.allclose(__snake_case , __snake_case , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__snake_case ) if push_to_hub: print("Pushing model, processor and slow tokenizer files to the hub..." ) model.push_to_hub(__snake_case , organization="nielsr" ) processor.push_to_hub(__snake_case , organization="nielsr" ) slow_tokenizer.push_to_hub(__snake_case , organization="nielsr" ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='xclip-base-patch32', type=str, help='Name of the model.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCamelCase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
625
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize): '''simple docstring''' __A : Union[str, Any] = 'bilinear' __A : int = max_size __A : Optional[Any] = short_edge_length def __call__( self , _UpperCAmelCase): '''simple docstring''' __A : int = [] for img in imgs: __A ,__A : Dict = img.shape[:2] # later: provide list and randomly choose index for resize __A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1) if size == 0: return img __A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase) if h < w: __A ,__A : Optional[Any] = size, scale * w else: __A ,__A : Optional[Any] = scale * h, size if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size: __A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase) __A : Tuple = newh * scale __A : Dict = neww * scale __A : Dict = int(neww + 0.5) __A : Optional[int] = int(newh + 0.5) if img.dtype == np.uinta: __A : int = Image.fromarray(_UpperCAmelCase) __A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR) __A : Dict = np.asarray(_UpperCAmelCase) else: __A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw __A : Dict = nn.functional.interpolate( _UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0) img_augs.append(_UpperCAmelCase) return img_augs class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase): '''simple docstring''' __A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST) __A : List[Any] = cfg.INPUT.FORMAT __A : Dict = cfg.SIZE_DIVISIBILITY __A : str = cfg.PAD_VALUE __A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST __A : int = cfg.MODEL.DEVICE __A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) __A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) __A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' __A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images])) __A : Dict = [im.shape[-2:] for im in images] __A : Optional[int] = [ nn.functional.pad( _UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(_UpperCAmelCase , _UpperCAmelCase) ] return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase) def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False): '''simple docstring''' with torch.no_grad(): if not isinstance(_UpperCAmelCase , _UpperCAmelCase): __A : int = [images] if single_image: assert len(_UpperCAmelCase) == 1 for i in range(len(_UpperCAmelCase)): if isinstance(images[i] , torch.Tensor): images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float()) elif not isinstance(images[i] , torch.Tensor): images.insert( _UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format)) .to(self.device) .float() , ) # resize smallest edge __A : str = torch.tensor([im.shape[:2] for im in images]) __A : List[str] = self.aug(_UpperCAmelCase) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic __A : Any = [self.normalizer(_UpperCAmelCase) for x in images] # now pad them to do the following operations __A ,__A : Any = self.pad(_UpperCAmelCase) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad __A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict: boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int: assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!" __A ,__A : int = box_size tensor[:, 0].clamp_(min=0 , max=__snake_case ) tensor[:, 1].clamp_(min=0 , max=__snake_case ) tensor[:, 2].clamp_(min=0 , max=__snake_case ) tensor[:, 3].clamp_(min=0 , max=__snake_case )
8
0
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __UpperCamelCase ( unittest.TestCase ): def a__ ( self :List[Any] ): snake_case_ : Dict = 0 def a__ ( self :List[str] ): snake_case_ : Dict = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase ) def a__ ( self :Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Union[str, Any] = Path(_UpperCAmelCase ) / 'preprocessor_config.json' snake_case_ : Dict = Path(_UpperCAmelCase ) / 'config.json' json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(_UpperCAmelCase ,"""w""" ) ,) json.dump({"""model_type""": """clip"""} ,open(_UpperCAmelCase ,"""w""" ) ) snake_case_ : List[Any] = AutoImageProcessor.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase ) def a__ ( self :List[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Tuple = Path(_UpperCAmelCase ) / 'preprocessor_config.json' snake_case_ : Tuple = Path(_UpperCAmelCase ) / 'config.json' json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(_UpperCAmelCase ,"""w""" ) ,) json.dump({"""model_type""": """clip"""} ,open(_UpperCAmelCase ,"""w""" ) ) snake_case_ : int = AutoImageProcessor.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase ) def a__ ( self :Tuple ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Optional[int] = CLIPConfig() # Create a dummy config file with image_proceesor_type snake_case_ : Union[str, Any] = Path(_UpperCAmelCase ) / 'preprocessor_config.json' snake_case_ : List[Any] = Path(_UpperCAmelCase ) / 'config.json' json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(_UpperCAmelCase ,"""w""" ) ,) json.dump({"""model_type""": """clip"""} ,open(_UpperCAmelCase ,"""w""" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally snake_case_ : int = AutoImageProcessor.from_pretrained(_UpperCAmelCase ).to_dict() config_dict.pop("""image_processor_type""" ) snake_case_ : List[Any] = CLIPImageProcessor(**_UpperCAmelCase ) # save in new folder model_config.save_pretrained(_UpperCAmelCase ) config.save_pretrained(_UpperCAmelCase ) snake_case_ : str = AutoImageProcessor.from_pretrained(_UpperCAmelCase ) # make sure private variable is not incorrectly saved snake_case_ : List[str] = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase ) def a__ ( self :Union[str, Any] ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Dict = Path(_UpperCAmelCase ) / 'preprocessor_config.json' json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(_UpperCAmelCase ,"""w""" ) ,) snake_case_ : int = AutoImageProcessor.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase ) def a__ ( self :int ): with self.assertRaisesRegex( _UpperCAmelCase ,"""clip-base is not a local folder and is not a valid model identifier""" ): snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("""clip-base""" ) def a__ ( self :Tuple ): with self.assertRaisesRegex( _UpperCAmelCase ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): snake_case_ : List[Any] = AutoImageProcessor.from_pretrained(_UpperCAmelCase ,revision="""aaaaaa""" ) def a__ ( self :Optional[int] ): with self.assertRaisesRegex( _UpperCAmelCase ,"""hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" ,): snake_case_ : Dict = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" ) def a__ ( self :Optional[int] ): with self.assertRaises(_UpperCAmelCase ): snake_case_ : int = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(_UpperCAmelCase ): snake_case_ : Dict = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=_UpperCAmelCase ) snake_case_ : Union[str, Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=_UpperCAmelCase ) self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_UpperCAmelCase ) snake_case_ : List[Any] = AutoImageProcessor.from_pretrained(_UpperCAmelCase ,trust_remote_code=_UpperCAmelCase ) self.assertEqual(reloaded_image_processor.__class__.__name__ ,"""NewImageProcessor""" ) def a__ ( self :Optional[int] ): try: AutoConfig.register("""custom""" ,_UpperCAmelCase ) AutoImageProcessor.register(_UpperCAmelCase ,_UpperCAmelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_UpperCAmelCase ): AutoImageProcessor.register(_UpperCAmelCase ,_UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Optional[int] = Path(_UpperCAmelCase ) / 'preprocessor_config.json' snake_case_ : Optional[Any] = Path(_UpperCAmelCase ) / 'config.json' json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(_UpperCAmelCase ,"""w""" ) ,) json.dump({"""model_type""": """clip"""} ,open(_UpperCAmelCase ,"""w""" ) ) snake_case_ : Union[str, Any] = CustomImageProcessor.from_pretrained(_UpperCAmelCase ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_UpperCAmelCase ) snake_case_ : List[str] = AutoImageProcessor.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def a__ ( self :Any ): class __UpperCamelCase ( a__ ): lowercase : str = True try: AutoConfig.register("""custom""" ,_UpperCAmelCase ) AutoImageProcessor.register(_UpperCAmelCase ,_UpperCAmelCase ) # If remote code is not set, the default is to use local snake_case_ : Dict = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. snake_case_ : Dict = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=_UpperCAmelCase ) self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub snake_case_ : Tuple = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=_UpperCAmelCase ) self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" ) self.assertTrue(not hasattr(_UpperCAmelCase ,"""is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
334
'''simple docstring''' def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741 __A : Tuple = len(__snake_case ) __A : Optional[int] = 0 __A : str = [0] * n __A : int = [False] * n __A : Tuple = [False] * n def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ): if parent == root: out_edge_count += 1 __A : str = True __A : Tuple = at for to in l[at]: if to == parent: pass elif not visited[to]: __A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case ) __A : int = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: __A : Tuple = True # AP found via cycle if at == low[to]: __A : Optional[Any] = True else: __A : Any = min(low[at] , __snake_case ) return out_edge_count for i in range(__snake_case ): if not visited[i]: __A : Tuple = 0 __A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case ) __A : Union[str, Any] = out_edge_count > 1 for x in range(len(__snake_case ) ): if is_art[x] is True: print(__snake_case ) # Adjacency list of graph lowercase__ : Tuple = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
8
0
'''simple docstring''' import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 lowerCAmelCase_ : Optional[int] = data_utils.TransfoXLTokenizer lowerCAmelCase_ : str = data_utils.TransfoXLCorpus lowerCAmelCase_ : Union[str, Any] = data_utils lowerCAmelCase_ : int = data_utils def _lowerCamelCase (__lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ) -> Optional[Any]: if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(__snake_case , "rb" ) as fp: a__ = pickle.load(__snake_case , encoding="latin1" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) a__ = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' ) a__ = corpus.vocab.__dict__ torch.save(__snake_case , __snake_case ) a__ = corpus.__dict__ corpus_dict_no_vocab.pop("vocab" , __snake_case ) a__ = pytorch_dump_folder_path + '/' + CORPUS_NAME print(f'''Save dataset to {pytorch_dataset_dump_path}''' ) torch.save(__snake_case , __snake_case ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model a__ = os.path.abspath(__snake_case ) a__ = os.path.abspath(__snake_case ) print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' ) # Initialise PyTorch model if transfo_xl_config_file == "": a__ = TransfoXLConfig() else: a__ = TransfoXLConfig.from_json_file(__snake_case ) print(f'''Building PyTorch model from configuration: {config}''' ) a__ = TransfoXLLMHeadModel(__snake_case ) a__ = load_tf_weights_in_transfo_xl(__snake_case , __snake_case , __snake_case ) # Save pytorch-model a__ = os.path.join(__snake_case , __snake_case ) a__ = os.path.join(__snake_case , __snake_case ) print(f'''Save PyTorch model to {os.path.abspath(__snake_case )}''' ) torch.save(model.state_dict() , __snake_case ) print(f'''Save configuration file to {os.path.abspath(__snake_case )}''' ) with open(__snake_case , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase_ : List[Any] = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.", ) lowerCAmelCase_ : int = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
489
'''simple docstring''' import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() lowercase__ : Union[str, Any] = logging.get_logger(__name__) lowercase__ : int = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''', '''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''', '''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } lowercase__ : Dict = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]: for attribute in key.split('.' ): __A : int = getattr(__snake_case , __snake_case ) if weight_type is not None: __A : Optional[int] = getattr(__snake_case , __snake_case ).shape else: __A : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": __A : Tuple = value elif weight_type == "weight_g": __A : Union[str, Any] = value elif weight_type == "weight_v": __A : Optional[Any] = value elif weight_type == "bias": __A : Optional[int] = value else: __A : Optional[int] = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]: __A : Optional[Any] = [] __A : Any = fairseq_model.state_dict() __A : Union[str, Any] = hf_model.feature_extractor for name, value in fairseq_dict.items(): __A : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , ) __A : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __A : int = True if "*" in mapped_key: __A : Any = name.split(__snake_case )[0].split('.' )[-2] __A : List[Any] = mapped_key.replace('*' , __snake_case ) if "weight_g" in name: __A : Optional[Any] = 'weight_g' elif "weight_v" in name: __A : Union[str, Any] = 'weight_v' elif "bias" in name and "relative_attention_bias" not in name: __A : Optional[Any] = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __A : Tuple = 'weight' else: __A : Dict = None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(f'Unused weights: {unused_weights}' ) def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int: __A : int = full_name.split('conv_layers.' )[-1] __A : List[str] = name.split('.' ) __A : Optional[int] = int(items[0] ) __A : str = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) __A : Optional[int] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) __A : Union[str, Any] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) __A : Dict = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) __A : Any = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(__snake_case ) @torch.no_grad() def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any: # load the pre-trained checkpoints __A : List[str] = torch.load(__snake_case ) __A : Dict = WavLMConfigOrig(checkpoint['cfg'] ) __A : Optional[int] = WavLMOrig(__snake_case ) model.load_state_dict(checkpoint['model'] ) model.eval() if config_path is not None: __A : List[Any] = WavLMConfig.from_pretrained(__snake_case ) else: __A : Dict = WavLMConfig() __A : Optional[Any] = WavLMModel(__snake_case ) recursively_load_weights(__snake_case , __snake_case ) hf_wavlm.save_pretrained(__snake_case ) if __name__ == "__main__": lowercase__ : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowercase__ : Any = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
8
0
'''simple docstring''' import math import sys def __lowercase ( __lowercase ) -> int: '''simple docstring''' if number != int(__snake_case ): raise ValueError("the value of input must be a natural number" ) if number < 0: raise ValueError("the value of input must not be a negative number" ) if number == 0: return 1 _A = [-1] * (number + 1) _A = 0 for i in range(1 , number + 1 ): _A = sys.maxsize _A = int(math.sqrt(__snake_case ) ) for j in range(1 , root + 1 ): _A = 1 + answers[i - (j**2)] _A = min(__snake_case , __snake_case ) _A = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
330
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = 42 class SCREAMING_SNAKE_CASE (a__ , a__ ): @register_to_config def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ): '''simple docstring''' super().__init__() __A : Dict = sample_size # time if time_embedding_type == "fourier": __A : int = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase) __A : Any = 2 * block_out_channels[0] elif time_embedding_type == "positional": __A : List[str] = Timesteps( block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase) __A : List[str] = block_out_channels[0] if use_timestep_embedding: __A : Optional[Any] = block_out_channels[0] * 4 __A : Optional[int] = TimestepEmbedding( in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , ) __A : Dict = nn.ModuleList([]) __A : Dict = None __A : Tuple = nn.ModuleList([]) __A : Tuple = None # down __A : Any = in_channels for i, down_block_type in enumerate(_UpperCAmelCase): __A : Tuple = output_channel __A : Optional[Any] = block_out_channels[i] if i == 0: input_channel += extra_in_channels __A : List[str] = i == len(_UpperCAmelCase) - 1 __A : int = get_down_block( _UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(_UpperCAmelCase) # mid __A : str = get_mid_block( _UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , ) # up __A : Optional[int] = list(reversed(_UpperCAmelCase)) __A : Optional[int] = reversed_block_out_channels[0] if out_block_type is None: __A : str = out_channels else: __A : List[Any] = block_out_channels[0] for i, up_block_type in enumerate(_UpperCAmelCase): __A : Optional[Any] = output_channel __A : Optional[Any] = ( reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels ) __A : Dict = i == len(_UpperCAmelCase) - 1 __A : str = get_up_block( _UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(_UpperCAmelCase) __A : Optional[int] = output_channel # out __A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32) __A : Optional[Any] = get_out_block( out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , ) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ): '''simple docstring''' __A : Any = timestep if not torch.is_tensor(_UpperCAmelCase): __A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device) elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0: __A : Any = timesteps[None].to(sample.device) __A : List[Any] = self.time_proj(_UpperCAmelCase) if self.config.use_timestep_embedding: __A : Dict = self.time_mlp(_UpperCAmelCase) else: __A : Dict = timestep_embed[..., None] __A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) __A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:])) # 2. down __A : int = () for downsample_block in self.down_blocks: __A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase) down_block_res_samples += res_samples # 3. mid if self.mid_block: __A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase) # 4. up for i, upsample_block in enumerate(self.up_blocks): __A : Any = down_block_res_samples[-1:] __A : Optional[int] = down_block_res_samples[:-1] __A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase) # 5. post-process if self.out_block: __A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase) if not return_dict: return (sample,) return UNetaDOutput(sample=_UpperCAmelCase)
8
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : int = { '''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''], '''tokenization_m2m_100''': ['''M2M100Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = [ '''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''', '''M2M100ForConditionalGeneration''', '''M2M100Model''', '''M2M100PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
311
'''simple docstring''' def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int: if len(__snake_case ) != len(__snake_case ): raise ValueError('String lengths must match!' ) __A : Optional[Any] = 0 for chara, chara in zip(__snake_case , __snake_case ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
8
0
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class _A ( unittest.TestCase ): def __a ( self : List[Any] ) -> Any: """simple docstring""" lowercase : str = inspect.getfile(accelerate.test_utils ) lowercase : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) lowercase : List[str] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) lowercase : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def __a ( self : Any ) -> List[str]: """simple docstring""" print(f"""Found {torch.cuda.device_count()} devices.""" ) lowercase : Optional[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() ) @require_multi_gpu def __a ( self : Dict ) -> int: """simple docstring""" print(f"""Found {torch.cuda.device_count()} devices.""" ) lowercase : Dict = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(f"""Command: {cmd}""" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() ) @require_multi_gpu def __a ( self : Any ) -> List[Any]: """simple docstring""" lowercase : Optional[int] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() ) @require_multi_gpu def __a ( self : List[Any] ) -> Optional[int]: """simple docstring""" print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" ) lowercase : Optional[int] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase_ = Accelerator() lowerCAmelCase_ = (accelerator.state.process_index + 2, 10) lowerCAmelCase_ = torch.randint(0, 10, shape).to(accelerator.device) lowerCAmelCase_ = '''''' lowerCAmelCase_ = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." lowerCAmelCase_ = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." lowerCAmelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
217
'''simple docstring''' import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() lowercase__ : Tuple = logging.get_logger(__name__) def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]: __A : int = RobertaPreLayerNormConfig.from_pretrained( __snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] ) # convert state_dict __A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) ) __A : str = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.' ): __A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ): continue __A : str = tensor_value __A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case ) model.save_pretrained(__snake_case ) # convert tokenizer __A : List[Any] = AutoTokenizer.from_pretrained(__snake_case ) tokenizer.save_pretrained(__snake_case ) if __name__ == "__main__": lowercase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowercase__ : Optional[Any] = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
8
0
'''simple docstring''' from math import sqrt def __snake_case ( _UpperCAmelCase : int): assert isinstance(__snake_case, __snake_case) and ( number >= 0 ), "'number' must been an int and positive" UpperCamelCase = True # 0 and 1 are none primes. if number <= 1: UpperCamelCase = False for divisor in range(2, int(round(sqrt(__snake_case))) + 1): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: UpperCamelCase = False break # precondition assert isinstance(__snake_case, __snake_case), "'status' must been from type bool" return status def __snake_case ( _UpperCAmelCase : Tuple): assert isinstance(__snake_case, __snake_case) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N UpperCamelCase = list(range(2, n + 1)) UpperCamelCase = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(__snake_case)): for j in range(i + 1, len(__snake_case)): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): UpperCamelCase = 0 # filters actual prime numbers. UpperCamelCase = [x for x in begin_list if x != 0] # precondition assert isinstance(__snake_case, __snake_case), "'ans' must been from type list" return ans def __snake_case ( _UpperCAmelCase : str): assert isinstance(__snake_case, __snake_case) and (n > 2), "'N' must been an int and > 2" UpperCamelCase = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2, n + 1): if is_prime(__snake_case): ans.append(__snake_case) # precondition assert isinstance(__snake_case, __snake_case), "'ans' must been from type list" return ans def __snake_case ( _UpperCAmelCase : List[Any]): assert isinstance(__snake_case, __snake_case) and number >= 0, "'number' must been an int and >= 0" UpperCamelCase = [] # this list will be returns of the function. # potential prime number factors. UpperCamelCase = 2 UpperCamelCase = number if number == 0 or number == 1: ans.append(__snake_case) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(__snake_case): while quotient != 1: if is_prime(__snake_case) and (quotient % factor == 0): ans.append(__snake_case) quotient /= factor else: factor += 1 else: ans.append(__snake_case) # precondition assert isinstance(__snake_case, __snake_case), "'ans' must been from type list" return ans def __snake_case ( _UpperCAmelCase : str): assert isinstance(__snake_case, __snake_case) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase = 0 # prime factorization of 'number' UpperCamelCase = prime_factorization(__snake_case) UpperCamelCase = max(__snake_case) # precondition assert isinstance(__snake_case, __snake_case), "'ans' must been from type int" return ans def __snake_case ( _UpperCAmelCase : Optional[Any]): assert isinstance(__snake_case, __snake_case) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase = 0 # prime factorization of 'number' UpperCamelCase = prime_factorization(__snake_case) UpperCamelCase = min(__snake_case) # precondition assert isinstance(__snake_case, __snake_case), "'ans' must been from type int" return ans def __snake_case ( _UpperCAmelCase : Union[str, Any]): assert isinstance(__snake_case, __snake_case), "'number' must been an int" assert isinstance(number % 2 == 0, __snake_case), "compare bust been from type bool" return number % 2 == 0 def __snake_case ( _UpperCAmelCase : List[str]): assert isinstance(__snake_case, __snake_case), "'number' must been an int" assert isinstance(number % 2 != 0, __snake_case), "compare bust been from type bool" return number % 2 != 0 def __snake_case ( _UpperCAmelCase : Union[str, Any]): assert ( isinstance(__snake_case, __snake_case) and (number > 2) and is_even(__snake_case) ), "'number' must been an int, even and > 2" UpperCamelCase = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' UpperCamelCase = get_prime_numbers(__snake_case) UpperCamelCase = len(__snake_case) # run variable for while-loops. UpperCamelCase = 0 UpperCamelCase = None # exit variable. for break up the loops UpperCamelCase = True while i < len_pn and loop: UpperCamelCase = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: UpperCamelCase = False ans.append(prime_numbers[i]) ans.append(prime_numbers[j]) j += 1 i += 1 # precondition assert ( isinstance(__snake_case, __snake_case) and (len(__snake_case) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0]) and is_prime(ans[1]) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def __snake_case ( _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int): assert ( isinstance(__snake_case, __snake_case) and isinstance(__snake_case, __snake_case) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." UpperCamelCase = 0 while numbera != 0: UpperCamelCase = numbera % numbera UpperCamelCase = numbera UpperCamelCase = rest # precondition assert isinstance(__snake_case, __snake_case) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def __snake_case ( _UpperCAmelCase : Optional[int], _UpperCAmelCase : List[Any]): assert ( isinstance(__snake_case, __snake_case) and isinstance(__snake_case, __snake_case) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." UpperCamelCase = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' UpperCamelCase = prime_factorization(__snake_case) UpperCamelCase = prime_factorization(__snake_case) elif numbera == 1 or numbera == 1: UpperCamelCase = [] UpperCamelCase = [] UpperCamelCase = max(__snake_case, __snake_case) UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: UpperCamelCase = prime_fac_a.count(__snake_case) UpperCamelCase = prime_fac_a.count(__snake_case) for _ in range(max(__snake_case, __snake_case)): ans *= n else: UpperCamelCase = prime_fac_a.count(__snake_case) for _ in range(__snake_case): ans *= n done.append(__snake_case) # iterates through primeFac2 for n in prime_fac_a: if n not in done: UpperCamelCase = prime_fac_a.count(__snake_case) for _ in range(__snake_case): ans *= n done.append(__snake_case) # precondition assert isinstance(__snake_case, __snake_case) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def __snake_case ( _UpperCAmelCase : Optional[Any]): assert isinstance(__snake_case, __snake_case) and (n >= 0), "'number' must been a positive int" UpperCamelCase = 0 UpperCamelCase = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(__snake_case): ans += 1 # precondition assert isinstance(__snake_case, __snake_case) and is_prime( __snake_case), "'ans' must been a prime number and from type int" return ans def __snake_case ( _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[int]): assert ( is_prime(__snake_case) and is_prime(__snake_case) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" UpperCamelCase = p_number_a + 1 # jump to the next number UpperCamelCase = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(__snake_case): number += 1 while number < p_number_a: ans.append(__snake_case) number += 1 # fetch the next prime number. while not is_prime(__snake_case): number += 1 # precondition assert ( isinstance(__snake_case, __snake_case) and ans[0] != p_number_a and ans[len(__snake_case) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def __snake_case ( _UpperCAmelCase : Dict): assert isinstance(__snake_case, __snake_case) and (n >= 1), "'n' must been int and >= 1" UpperCamelCase = [] # will be returned. for divisor in range(1, n + 1): if n % divisor == 0: ans.append(__snake_case) # precondition assert ans[0] == 1 and ans[len(__snake_case) - 1] == n, "Error in function getDivisiors(...)" return ans def __snake_case ( _UpperCAmelCase : Union[str, Any]): assert isinstance(__snake_case, __snake_case) and ( number > 1 ), "'number' must been an int and >= 1" UpperCamelCase = get_divisors(__snake_case) # precondition assert ( isinstance(__snake_case, __snake_case) and (divisors[0] == 1) and (divisors[len(__snake_case) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1]) == number def __snake_case ( _UpperCAmelCase : Any, _UpperCAmelCase : int): assert ( isinstance(__snake_case, __snake_case) and isinstance(__snake_case, __snake_case) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. UpperCamelCase = gcd(abs(__snake_case), abs(__snake_case)) # precondition assert ( isinstance(__snake_case, __snake_case) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def __snake_case ( _UpperCAmelCase : Tuple): assert isinstance(__snake_case, __snake_case) and (n >= 0), "'n' must been a int and >= 0" UpperCamelCase = 1 # this will be return. for factor in range(1, n + 1): ans *= factor return ans def __snake_case ( _UpperCAmelCase : str): assert isinstance(__snake_case, __snake_case) and (n >= 0), "'n' must been an int and >= 0" UpperCamelCase = 0 UpperCamelCase = 1 UpperCamelCase = 1 # this will be return for _ in range(n - 1): UpperCamelCase = ans ans += fiba UpperCamelCase = tmp return ans
212
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings lowercase__ : Dict = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} ) lowerCAmelCase = field( default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': ( '''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `max_length` value of the model configuration.''' ) } , ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': ( '''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `num_beams` value of the model configuration.''' ) } , ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.''' } , ) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = super().to_dict() for k, v in d.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase): __A : List[Any] = v.to_dict() return d
8
0
'''simple docstring''' import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =mock.Mock() lowerCamelCase_ =500 lowerCamelCase_ ={} lowerCamelCase_ =HTTPError lowerCamelCase_ ={} # Download this model to make sure it's in the cache. lowerCamelCase_ =BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''', return_value=_UpperCAmelCase ) as mock_head: lowerCamelCase_ =BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =mock.Mock() lowerCamelCase_ =500 lowerCamelCase_ ={} lowerCamelCase_ =HTTPError lowerCamelCase_ ={} # Download this model to make sure it's in the cache. lowerCamelCase_ =GPTaTokenizerFast.from_pretrained('''gpt2''' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''', return_value=_UpperCAmelCase ) as mock_head: lowerCamelCase_ =GPTaTokenizerFast.from_pretrained('''gpt2''' ) # This check we did call the fake head request mock_head.assert_called() def lowercase__ ( self ): """simple docstring""" try: lowerCamelCase_ =tempfile.mktemp() with open(_UpperCAmelCase, '''wb''' ) as f: http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', _UpperCAmelCase ) lowerCamelCase_ =AlbertTokenizer.from_pretrained(_UpperCAmelCase ) finally: os.remove(_UpperCAmelCase ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('''tokenizer.json''' ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('''tokenizer.json''', '''wb''' ) as f: http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''', _UpperCAmelCase ) lowerCamelCase_ =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size, 1_000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('''tokenizer.json''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' ) @is_staging_test class __UpperCamelCase ( unittest.TestCase ): lowercase : Any =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] @classmethod def lowercase__ ( cls ): """simple docstring""" lowerCamelCase_ =TOKEN HfFolder.save_token(_UpperCAmelCase ) @classmethod def lowercase__ ( cls ): """simple docstring""" try: delete_repo(token=cls._token, repo_id='''test-tokenizer''' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='''valid_org/test-tokenizer-org''' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='''test-dynamic-tokenizer''' ) except HTTPError: pass def lowercase__ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase_ =os.path.join(_UpperCAmelCase, '''vocab.txt''' ) with open(_UpperCAmelCase, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) lowerCamelCase_ =BertTokenizer(_UpperCAmelCase ) tokenizer.push_to_hub('''test-tokenizer''', use_auth_token=self._token ) lowerCamelCase_ =BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab ) # Reset repo delete_repo(token=self._token, repo_id='''test-tokenizer''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_UpperCAmelCase, repo_id='''test-tokenizer''', push_to_hub=_UpperCAmelCase, use_auth_token=self._token ) lowerCamelCase_ =BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab ) def lowercase__ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase_ =os.path.join(_UpperCAmelCase, '''vocab.txt''' ) with open(_UpperCAmelCase, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) lowerCamelCase_ =BertTokenizer(_UpperCAmelCase ) tokenizer.push_to_hub('''valid_org/test-tokenizer-org''', use_auth_token=self._token ) lowerCamelCase_ =BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' ) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab ) # Reset repo delete_repo(token=self._token, repo_id='''valid_org/test-tokenizer-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( _UpperCAmelCase, repo_id='''valid_org/test-tokenizer-org''', push_to_hub=_UpperCAmelCase, use_auth_token=self._token ) lowerCamelCase_ =BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' ) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab ) @require_tokenizers def lowercase__ ( self ): """simple docstring""" CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase_ =os.path.join(_UpperCAmelCase, '''vocab.txt''' ) with open(_UpperCAmelCase, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) lowerCamelCase_ =CustomTokenizer(_UpperCAmelCase ) # No fast custom tokenizer tokenizer.push_to_hub('''test-dynamic-tokenizer''', use_auth_token=self._token ) lowerCamelCase_ =AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''', trust_remote_code=_UpperCAmelCase ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizer''' ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase_ =os.path.join(_UpperCAmelCase, '''vocab.txt''' ) with open(_UpperCAmelCase, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) lowerCamelCase_ =BertTokenizerFast.from_pretrained(_UpperCAmelCase ) bert_tokenizer.save_pretrained(_UpperCAmelCase ) lowerCamelCase_ =CustomTokenizerFast.from_pretrained(_UpperCAmelCase ) tokenizer.push_to_hub('''test-dynamic-tokenizer''', use_auth_token=self._token ) lowerCamelCase_ =AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''', trust_remote_code=_UpperCAmelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizerFast''' ) lowerCamelCase_ =AutoTokenizer.from_pretrained( f'''{USER}/test-dynamic-tokenizer''', use_fast=_UpperCAmelCase, trust_remote_code=_UpperCAmelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizer''' ) class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Trie() trie.add('''Hello 友達''' ) self.assertEqual(trie.data, {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} ) trie.add('''Hello''' ) trie.data self.assertEqual(trie.data, {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Trie() self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ), ['''[CLS] This is a extra_id_100'''] ) trie.add('''[CLS]''' ) trie.add('''extra_id_1''' ) trie.add('''extra_id_100''' ) self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ), ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Trie() trie.add('''A''' ) self.assertEqual(trie.split('''ABC''' ), ['''A''', '''BC'''] ) self.assertEqual(trie.split('''BCA''' ), ['''BC''', '''A'''] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Trie() trie.add('''TOKEN]''' ) trie.add('''[SPECIAL_TOKEN]''' ) self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ), ['''This is something ''', '''[SPECIAL_TOKEN]'''] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Trie() trie.add('''A''' ) trie.add('''P''' ) trie.add('''[SPECIAL_TOKEN]''' ) self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ), ['''This is something ''', '''[SPECIAL_TOKEN]'''] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Trie() trie.add('''AB''' ) trie.add('''B''' ) trie.add('''C''' ) self.assertEqual(trie.split('''ABC''' ), ['''AB''', '''C'''] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Trie() trie.add('''ABC''' ) trie.add('''B''' ) trie.add('''CD''' ) self.assertEqual(trie.split('''ABCD''' ), ['''ABC''', '''D'''] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Trie() lowerCamelCase_ =trie.cut_text('''ABC''', [0, 0, 2, 1, 2, 3] ) self.assertEqual(_UpperCAmelCase, ['''AB''', '''C'''] )
676
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : List[Any] = logging.get_logger(__name__) lowercase__ : Optional[int] = { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = '''lxmert''' lowerCAmelCase = {} def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ): '''simple docstring''' __A : Tuple = vocab_size __A : int = hidden_size __A : str = num_attention_heads __A : Tuple = hidden_act __A : int = intermediate_size __A : str = hidden_dropout_prob __A : Optional[int] = attention_probs_dropout_prob __A : Optional[Any] = max_position_embeddings __A : Tuple = type_vocab_size __A : Optional[int] = initializer_range __A : Any = layer_norm_eps __A : Optional[Any] = num_qa_labels __A : Optional[int] = num_object_labels __A : Any = num_attr_labels __A : Union[str, Any] = l_layers __A : Optional[int] = x_layers __A : List[Any] = r_layers __A : Tuple = visual_feat_dim __A : Tuple = visual_pos_dim __A : Optional[int] = visual_loss_normalizer __A : int = task_matched __A : List[Any] = task_mask_lm __A : Optional[Any] = task_obj_predict __A : str = task_qa __A : List[Any] = visual_obj_loss __A : Optional[Any] = visual_attr_loss __A : Union[str, Any] = visual_feat_loss __A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers} super().__init__(**_UpperCAmelCase)
8
0
from __future__ import annotations from collections.abc import Sequence from typing import Literal def A ( _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Optional[int] = list(__snake_case ) SCREAMING_SNAKE_CASE : int = list(__snake_case ) SCREAMING_SNAKE_CASE : List[str] = 0 for i in range(len(__snake_case ) ): if lista[i] != lista[i]: count += 1 SCREAMING_SNAKE_CASE : Tuple = '_' if count > 1: return False else: return "".join(__snake_case ) def A ( _lowercase ): SCREAMING_SNAKE_CASE : List[str] = [] while True: SCREAMING_SNAKE_CASE : Optional[int] = ['$'] * len(__snake_case ) SCREAMING_SNAKE_CASE : Dict = [] for i in range(len(__snake_case ) ): for j in range(i + 1 , len(__snake_case ) ): SCREAMING_SNAKE_CASE : Any = compare_string(binary[i] , binary[j] ) if k is False: SCREAMING_SNAKE_CASE : List[str] = '*' SCREAMING_SNAKE_CASE : List[str] = '*' temp.append('''X''' ) for i in range(len(__snake_case ) ): if checka[i] == "$": pi.append(binary[i] ) if len(__snake_case ) == 0: return pi SCREAMING_SNAKE_CASE : Union[str, Any] = list(set(__snake_case ) ) def A ( _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Optional[int] = [] for minterm in minterms: SCREAMING_SNAKE_CASE : Tuple = '' for _ in range(__snake_case ): SCREAMING_SNAKE_CASE : int = str(minterm % 2 ) + string minterm //= 2 temp.append(__snake_case ) return temp def A ( _lowercase , _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : str = list(__snake_case ) SCREAMING_SNAKE_CASE : List[Any] = list(__snake_case ) SCREAMING_SNAKE_CASE : List[Any] = 0 for i in range(len(__snake_case ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def A ( _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Any = [0] * len(__snake_case ) for i in range(len(chart[0] ) ): SCREAMING_SNAKE_CASE : List[str] = 0 SCREAMING_SNAKE_CASE : int = -1 for j in range(len(__snake_case ) ): if chart[j][i] == 1: count += 1 SCREAMING_SNAKE_CASE : Any = j if count == 1: SCREAMING_SNAKE_CASE : Dict = 1 for i in range(len(__snake_case ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(__snake_case ) ): SCREAMING_SNAKE_CASE : Optional[Any] = 0 temp.append(prime_implicants[i] ) while True: SCREAMING_SNAKE_CASE : List[str] = 0 SCREAMING_SNAKE_CASE : Optional[Any] = -1 SCREAMING_SNAKE_CASE : Tuple = 0 for i in range(len(__snake_case ) ): SCREAMING_SNAKE_CASE : Optional[Any] = chart[i].count(1 ) if count_n > max_n: SCREAMING_SNAKE_CASE : List[Any] = count_n SCREAMING_SNAKE_CASE : List[str] = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(__snake_case ) ): SCREAMING_SNAKE_CASE : Dict = 0 def A ( _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = [[0 for x in range(len(__snake_case ) )] for x in range(len(__snake_case ) )] for i in range(len(__snake_case ) ): SCREAMING_SNAKE_CASE : Optional[int] = prime_implicants[i].count('''_''' ) for j in range(len(__snake_case ) ): if is_for_table(prime_implicants[i] , binary[j] , __snake_case ): SCREAMING_SNAKE_CASE : int = 1 return chart def A ( ): SCREAMING_SNAKE_CASE : Optional[Any] = int(input('''Enter the no. of variables\n''' ) ) SCREAMING_SNAKE_CASE : Optional[int] = [ float(__snake_case ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] SCREAMING_SNAKE_CASE : Optional[Any] = decimal_to_binary(__snake_case , __snake_case ) SCREAMING_SNAKE_CASE : Dict = check(__snake_case ) print('''Prime Implicants are:''' ) print(__snake_case ) SCREAMING_SNAKE_CASE : List[Any] = prime_implicant_chart(__snake_case , __snake_case ) SCREAMING_SNAKE_CASE : Tuple = selection(__snake_case , __snake_case ) print('''Essential Prime Implicants are:''' ) print(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod() main()
248
'''simple docstring''' import math import sys def _lowerCAmelCase ( __snake_case : int ) -> int: if number != int(__snake_case ): raise ValueError('the value of input must be a natural number' ) if number < 0: raise ValueError('the value of input must not be a negative number' ) if number == 0: return 1 __A : str = [-1] * (number + 1) __A : Dict = 0 for i in range(1 , number + 1 ): __A : int = sys.maxsize __A : int = int(math.sqrt(__snake_case ) ) for j in range(1 , root + 1 ): __A : str = 1 + answers[i - (j**2)] __A : Dict = min(__snake_case , __snake_case ) __A : Union[str, Any] = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
8
0
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" _enforce_args(__snake_case , __snake_case ) if n == 0: return 0 A__ = float('''-inf''' ) for i in range(1 , n + 1 ): A__ = max( __snake_case , prices[i - 1] + naive_cut_rod_recursive(n - i , __snake_case ) ) return max_revue def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]: """simple docstring""" _enforce_args(__snake_case , __snake_case ) A__ = [float('''-inf''' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__snake_case , __snake_case , __snake_case ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Any: """simple docstring""" if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: A__ = float('''-inf''' ) for i in range(1 , n + 1 ): A__ = max( __snake_case , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __snake_case , __snake_case ) , ) A__ = max_revenue return max_rev[n] def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any: """simple docstring""" _enforce_args(__snake_case , __snake_case ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. A__ = [float('''-inf''' ) for _ in range(n + 1 )] A__ = 0 for i in range(1 , n + 1 ): A__ = max_rev[i] for j in range(1 , i + 1 ): A__ = max(__snake_case , prices[j - 1] + max_rev[i - j] ) A__ = max_revenue_i return max_rev[n] def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[Any]: """simple docstring""" if n < 0: A__ = f"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__snake_case ) if n > len(__snake_case ): A__ = ( 'Each integral piece of rod must have a corresponding price. ' f"""Got n = {n} but length of prices = {len(__snake_case )}""" ) raise ValueError(__snake_case ) def SCREAMING_SNAKE_CASE ( ) -> Tuple: """simple docstring""" A__ = [6, 10, 12, 15, 20, 23] A__ = len(__snake_case ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. A__ = 36 A__ = top_down_cut_rod(__snake_case , __snake_case ) A__ = bottom_up_cut_rod(__snake_case , __snake_case ) A__ = naive_cut_rod_recursive(__snake_case , __snake_case ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
87
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]: __A : int = list(range(len(__snake_case ) ) ) __A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )] index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case ) __A : float = 0 __A : list[float] = [0] * len(__snake_case ) for i in index: if weight[i] <= capacity: __A : Optional[int] = 1 max_value += value[i] capacity -= weight[i] else: __A : List[Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
8
0
from PIL import Image def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Image , UpperCAmelCase_ : float ) -> Image: def brightness(UpperCAmelCase_ : int ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' ) return img.point(__snake_case ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 _lowercase = change_brightness(img, 100) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
443
'''simple docstring''' from __future__ import annotations import math class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase): '''simple docstring''' __A : int = size # approximate the overall size of segment tree with given value __A : Optional[Any] = [0 for i in range(0 , 4 * size)] # create array to store lazy update __A : Optional[Any] = [0 for i in range(0 , 4 * size)] __A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' return idx * 2 def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' return idx * 2 + 1 def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if left_element == right_element: __A : List[Any] = a[left_element - 1] else: __A : List[str] = (left_element + right_element) // 2 self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase) __A : Any = max( self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)]) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if self.flag[idx] is True: __A : Optional[Any] = self.lazy[idx] __A : Optional[Any] = False if left_element != right_element: __A : List[Any] = self.lazy[idx] __A : Dict = self.lazy[idx] __A : Tuple = True __A : Union[str, Any] = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: __A : Optional[int] = val if left_element != right_element: __A : Tuple = val __A : Any = val __A : Tuple = True __A : Union[str, Any] = True return True __A : str = (left_element + right_element) // 2 self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) __A : int = max( self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)]) return True def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if self.flag[idx] is True: __A : Union[str, Any] = self.lazy[idx] __A : List[str] = False if left_element != right_element: __A : Union[str, Any] = self.lazy[idx] __A : Optional[int] = self.lazy[idx] __A : str = True __A : Union[str, Any] = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] __A : Any = (left_element + right_element) // 2 __A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) __A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return max(_UpperCAmelCase , _UpperCAmelCase) def __str__( self): '''simple docstring''' return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)]) if __name__ == "__main__": lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] lowercase__ : str = 15 lowercase__ : List[Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 1_11) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 2_35) print(segt)
8
0
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) def _UpperCAmelCase ( A , A ): '''simple docstring''' UpperCAmelCase__ =RobertaPreLayerNormConfig.from_pretrained( __snake_case , architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict UpperCAmelCase__ =torch.load(hf_hub_download(repo_id=__snake_case , filename="pytorch_model.bin" ) ) UpperCAmelCase__ ={} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): UpperCAmelCase__ ='roberta_prelayernorm.' + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue UpperCAmelCase__ =tensor_value UpperCAmelCase__ =RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case ) model.save_pretrained(__snake_case ) # convert tokenizer UpperCAmelCase__ =AutoTokenizer.from_pretrained(__snake_case ) tokenizer.save_pretrained(__snake_case ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint-repo', default=None, type=str, required=True, help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCamelCase_ = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
625
'''simple docstring''' def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float: __A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def _lowerCAmelCase ( ) -> Union[str, Any]: print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
8
0
'''simple docstring''' import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class __UpperCamelCase ( unittest.TestCase ): def a__ ( self :int ): snake_case_ : Dict = inspect.getfile(accelerate.test_utils ) snake_case_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] ) snake_case_ : Union[str, Any] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def a__ ( self :int ): snake_case_ : Tuple = F'''\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '''.split() snake_case_ : Optional[int] = [sys.executable] + distributed_args execute_subprocess_async(_UpperCAmelCase ,env=os.environ.copy() )
334
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ): '''simple docstring''' __A : Optional[int] = parent __A : str = 13 __A : List[Any] = 7 __A : List[str] = True __A : str = True __A : Optional[Any] = True __A : int = True __A : Dict = 99 __A : Dict = 384 __A : Any = 2 __A : int = 4 __A : Optional[Any] = 37 __A : Optional[int] = 'gelu' __A : Dict = 0.1 __A : Optional[int] = 0.1 __A : Any = 512 __A : int = 16 __A : List[str] = 2 __A : str = 0.02 __A : Any = 3 __A : str = 4 __A : Union[str, Any] = 128 __A : int = 2 __A : List[Any] = 9 __A : List[Any] = 1 __A : List[Any] = None def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __A : str = None if self.use_input_mask: __A : List[Any] = random_attention_mask([self.batch_size, self.seq_length]) __A : Optional[Any] = None if self.use_token_type_ids: __A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __A : Optional[int] = None __A : List[str] = None __A : Dict = None if self.use_labels: __A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) __A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __A : str = ids_tensor([self.batch_size] , self.num_choices) __A : List[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : int = TFConvBertModel(config=_UpperCAmelCase) __A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __A : Tuple = [input_ids, input_mask] __A : Any = model(_UpperCAmelCase) __A : Dict = model(_UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase) __A : str = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : str = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Optional[int] = self.num_labels __A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase) __A : Optional[Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : Dict = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Tuple = self.num_choices __A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase) __A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1)) __A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1)) __A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1)) __A : int = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __A : Optional[Any] = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : List[Any] = self.num_labels __A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase) __A : str = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : int = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase) __A : Any = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __A : Union[str, Any] = model(_UpperCAmelCase) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[int] = self.prepare_config_and_inputs() ( ( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) , ) : Union[str, Any] = config_and_inputs __A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ): lowerCAmelCase = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) lowerCAmelCase = ( { '''feature-extraction''': TFConvBertModel, '''fill-mask''': TFConvBertForMaskedLM, '''question-answering''': TFConvBertForQuestionAnswering, '''text-classification''': TFConvBertForSequenceClassification, '''token-classification''': TFConvBertForTokenClassification, '''zero-shot''': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = TFConvBertModelTester(self) __A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __A : List[str] = True __A : List[str] = True if hasattr(_UpperCAmelCase , 'use_cache'): __A : List[Any] = True __A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length) __A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase) for model_class in self.all_model_classes: __A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase) __A : Optional[int] = model_class(_UpperCAmelCase) __A : Optional[Any] = len(model(_UpperCAmelCase)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase) __A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1') __A : Tuple = tf.keras.models.load_model(_UpperCAmelCase) __A : str = model(_UpperCAmelCase) if self.is_encoder_decoder: __A : Optional[int] = outputs['encoder_hidden_states'] __A : str = outputs['encoder_attentions'] else: __A : List[Any] = outputs['hidden_states'] __A : Optional[Any] = outputs['attentions'] self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase) __A : str = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase) self.assertListEqual( list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base') self.assertIsNotNone(_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common() __A : Any = True __A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length) __A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length) __A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase) __A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase) def check_decoder_attentions_output(_UpperCAmelCase): __A : List[str] = len(_UpperCAmelCase) self.assertEqual(out_len % 2 , 0) __A : Any = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_UpperCAmelCase): __A : str = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __A : Dict = True __A : Any = False __A : str = model_class(_UpperCAmelCase) __A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : List[str] = len(_UpperCAmelCase) self.assertEqual(config.output_hidden_states , _UpperCAmelCase) check_encoder_attentions_output(_UpperCAmelCase) if self.is_encoder_decoder: __A : Union[str, Any] = model_class(_UpperCAmelCase) __A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) self.assertEqual(config.output_hidden_states , _UpperCAmelCase) check_decoder_attentions_output(_UpperCAmelCase) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __A : int = True __A : Tuple = model_class(_UpperCAmelCase) __A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) self.assertEqual(config.output_hidden_states , _UpperCAmelCase) check_encoder_attentions_output(_UpperCAmelCase) # Check attention is always last and order is fine __A : Any = True __A : str = True __A : Union[str, Any] = model_class(_UpperCAmelCase) __A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase)) self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase) check_encoder_attentions_output(_UpperCAmelCase) @require_tf class SCREAMING_SNAKE_CASE (unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base') __A : str = tf.constant([[0, 1, 2, 3, 4, 5]]) __A : Optional[int] = model(_UpperCAmelCase)[0] __A : List[Any] = [1, 6, 768] self.assertEqual(output.shape , _UpperCAmelCase) __A : Tuple = tf.constant( [ [ [-0.03475493, -0.4686034, -0.30638832], [0.22637248, -0.26988646, -0.7423424], [0.10324868, -0.45013508, -0.58280784], ] ]) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4)
8
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ : str = { '''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig'''] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Optional[int] = ['''RemBertTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Tuple = ['''RemBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Optional[int] = [ '''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RemBertForCausalLM''', '''RemBertForMaskedLM''', '''RemBertForMultipleChoice''', '''RemBertForQuestionAnswering''', '''RemBertForSequenceClassification''', '''RemBertForTokenClassification''', '''RemBertLayer''', '''RemBertModel''', '''RemBertPreTrainedModel''', '''load_tf_weights_in_rembert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Tuple = [ '''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRemBertForCausalLM''', '''TFRemBertForMaskedLM''', '''TFRemBertForMultipleChoice''', '''TFRemBertForQuestionAnswering''', '''TFRemBertForSequenceClassification''', '''TFRemBertForTokenClassification''', '''TFRemBertLayer''', '''TFRemBertModel''', '''TFRemBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys lowerCAmelCase_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
489
'''simple docstring''' import argparse import os import re lowercase__ : Optional[int] = '''src/diffusers''' # Pattern that looks at the indentation in a line. lowercase__ : Dict = re.compile(r'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''') # Pattern that matches `"key",` and puts `key` in group 0. lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowercase__ : str = re.compile(r'''\[([^\]]+)\]''') def _lowerCAmelCase ( __snake_case : str ) -> Tuple: __A : List[Any] = _re_indent.search(__snake_case ) return "" if search is None else search.groups()[0] def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]: __A : Tuple = 0 __A : Optional[int] = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(__snake_case ): index += 1 __A : Optional[int] = ['\n'.join(lines[:index] )] else: __A : Any = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). __A : Tuple = [lines[index]] index += 1 while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(__snake_case ) ) if index < len(__snake_case ) - 1: __A : Union[str, Any] = [lines[index + 1]] index += 1 else: __A : Union[str, Any] = [] else: blocks.append('\n'.join(__snake_case ) ) __A : Optional[Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(__snake_case ) > 0: blocks.append('\n'.join(__snake_case ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(__snake_case ): blocks.append('\n'.join(lines[index:] ) ) return blocks def _lowerCAmelCase ( __snake_case : List[Any] ) -> int: def _inner(__snake_case : List[Any] ): return key(__snake_case ).lower().replace('_' , '' ) return _inner def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]: # If no key is provided, we use a noop. def noop(__snake_case : List[Any] ): return x if key is None: __A : Optional[Any] = noop # Constants are all uppercase, they go first. __A : str = [obj for obj in objects if key(__snake_case ).isupper()] # Classes are not all uppercase but start with a capital, they go second. __A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()] # Functions begin with a lowercase, they go last. __A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()] __A : Tuple = ignore_underscore(__snake_case ) return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple: # This inner function sort imports between [ ]. def _replace(__snake_case : Tuple ): __A : List[str] = match.groups()[0] if "," not in imports: return f'[{imports}]' __A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __A : Dict = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]" __A : List[Any] = import_statement.split('\n' ) if len(__snake_case ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. __A : Optional[int] = 2 if lines[1].strip() == '[' else 1 __A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] __A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] ) __A : Any = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(__snake_case ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: __A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] ) else: __A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __A : Tuple = keys[:-1] __A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] ) return "\n".join(__snake_case ) else: # Finally we have to deal with imports fitting on one line __A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case ) return import_statement def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]: with open(__snake_case , 'r' ) as f: __A : Dict = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 __A : str = split_code_in_indented_blocks( __snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(__snake_case ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. __A : Tuple = main_blocks[block_idx] __A : int = block.split('\n' ) # Get to the start of the imports. __A : Tuple = 0 while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: __A : Optional[int] = len(__snake_case ) else: line_idx += 1 if line_idx >= len(__snake_case ): continue # Ignore beginning and last line: they don't contain anything. __A : Dict = '\n'.join(block_lines[line_idx:-1] ) __A : int = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. __A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case ) # We have two categories of import key: list or _import_structure[key].append/extend __A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. __A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. __A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None] __A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. __A : str = 0 __A : Any = [] for i in range(len(__snake_case ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: __A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(__snake_case ) count += 1 # And we put our main block back together with its first and last line. __A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(__snake_case ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(__snake_case , 'w' ) as f: f.write('\n'.join(__snake_case ) ) def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]: __A : Tuple = [] for root, _, files in os.walk(__snake_case ): if "__init__.py" in files: __A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case ) if result: __A : Dict = [os.path.join(__snake_case , '__init__.py' )] if len(__snake_case ) > 0: raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' ) if __name__ == "__main__": lowercase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') lowercase__ : Union[str, Any] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
8
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
330
'''simple docstring''' def _lowerCAmelCase ( __snake_case : int ) -> bool: return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('''Program to check whether a number is a Perfect number or not...''') lowercase__ : int = int(input('''Enter number: ''').strip()) print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
8
0